diff --git a/.circleci/config.yml b/.circleci/config.yml index 8c2b443f1e84..36f0774131a5 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -9,7 +9,7 @@ _defaults: &defaults docker: # CircleCI maintains a library of pre-built images # documented at https://circleci.com/developer/images/image/cimg/python - - image: cimg/python:3.11.10 + - image: cimg/python:3.12.12 working_directory: ~/repo @@ -17,7 +17,8 @@ jobs: build: <<: *defaults steps: - - checkout + - checkout: + method: blobless - run: name: check skip @@ -52,7 +53,7 @@ jobs: - run: name: build NumPy command: | - python3.11 -m venv venv + python3.12 -m venv venv . venv/bin/activate pip install --progress-bar=off -r requirements/test_requirements.txt \ -r requirements/build_requirements.txt \ @@ -110,7 +111,8 @@ jobs: deploy: <<: *defaults steps: - - checkout + - checkout: + method: blobless - attach_workspace: at: ~/repo diff --git a/.cirrus.star b/.cirrus.star index c503f25720a7..3de5ce97b0e8 100644 --- a/.cirrus.star +++ b/.cirrus.star @@ -9,17 +9,12 @@ load("cirrus", "env", "fs", "http") def main(ctx): ###################################################################### - # Should wheels be built? # Only test on the numpy/numpy repository ###################################################################### if env.get("CIRRUS_REPO_FULL_NAME") != "numpy/numpy": return [] - # only run the wheels entry on a cron job - if env.get("CIRRUS_CRON", "") == "nightly": - return fs.read("tools/ci/cirrus_wheels.yml") - # Obtain commit message for the event. Unfortunately CIRRUS_CHANGE_MESSAGE # only contains the actual commit message on a non-PR trigger event. # For a PR event it contains the PR title and description. @@ -31,23 +26,10 @@ def main(ctx): if "[skip cirrus]" in commit_msg or "[skip ci]" in commit_msg: return [] - wheel = False labels = env.get("CIRRUS_PR_LABELS", "") pr_number = env.get("CIRRUS_PR", "-1") tag = env.get("CIRRUS_TAG", "") - if "[wheel build]" in commit_msg: - wheel = True - - # if int(pr_number) > 0 and ("14 - Release" in labels or "36 - Build" in labels): - # wheel = True - - if tag.startswith("v") and "dev0" not in tag: - wheel = True - - if wheel: - return fs.read("tools/ci/cirrus_wheels.yml") - if int(pr_number) < 0: return [] diff --git a/.clang-format b/.clang-format index 034478ae2466..7e94a6fdb47c 100644 --- a/.clang-format +++ b/.clang-format @@ -27,6 +27,7 @@ IncludeCategories: Priority: 1 - Regex: '^<[[:alnum:]_.]+"' Priority: 2 +IndentPPDirectives: AfterHash Language: Cpp PointerAlignment: Right ReflowComments: true diff --git a/.github/check-warnings/action.yml b/.github/check-warnings/action.yml new file mode 100644 index 000000000000..f3f6778e229b --- /dev/null +++ b/.github/check-warnings/action.yml @@ -0,0 +1,38 @@ +name: "Check Warnings" +description: "Filter build warnings against an allowlist" + +inputs: + log-file: + description: "Path to build log file" + required: true + allowlist: + description: "Path to allowed warnings regex file" + required: true + warning-regex: + description: "Regex to extract warnings from the log" + required: true + +runs: + using: "composite" + steps: + - name: Extract warnings + shell: bash + run: | + echo "Extracting warnings from ${{ inputs.log-file }} using regex: ${{ inputs['warning-regex'] }}" + grep -E "${{ inputs['warning-regex'] }}" "${{ inputs.log-file }}" | tee warnings.log || true + + if [ ! -s warnings.log ]; then + echo "No warnings found." + exit 0 + fi + + echo "Filtering against allowlist ${{ inputs.allowlist }}" + grep -v -F -f "${{ inputs.allowlist }}" warnings.log | tee disallowed.log || true + + if [ -s disallowed.log ]; then + echo "::error::Disallowed warnings detected:" + cat disallowed.log + exit 1 + else + echo "All warnings are allowed." + fi diff --git a/.github/check-warnings/msvc-allowed-warnings.txt b/.github/check-warnings/msvc-allowed-warnings.txt new file mode 100644 index 000000000000..4cc6e6ab2124 --- /dev/null +++ b/.github/check-warnings/msvc-allowed-warnings.txt @@ -0,0 +1,21 @@ +../numpy/_core/src/common/npy_cpu_features.c(451): warning C4098: 'npy__cpu_cpuid': 'void' function returning a value +../numpy/linalg/lapack_lite/f2c_c_lapack.c(1530): warning C4244: '=': conversion from 'real' to 'integer', possible loss of data +../numpy/linalg/lapack_lite/f2c_c_lapack.c(230): warning C4244: '=': conversion from 'real' to 'integer', possible loss of data +../numpy/linalg/lapack_lite/f2c_c_lapack.c(250): warning C4244: '=': conversion from 'real' to 'integer', possible loss of data +../numpy/linalg/lapack_lite/f2c_config.c(1368): warning C4244: '=': conversion from 'real' to 'integer', possible loss of data +../numpy/linalg/lapack_lite/f2c_s_lapack.c(1625): warning C4244: '=': conversion from 'real' to 'integer', possible loss of data +../numpy/linalg/lapack_lite/f2c_s_lapack.c(1645): warning C4244: '=': conversion from 'real' to 'integer', possible loss of data +../numpy/linalg/lapack_lite/f2c_s_lapack.c(2865): warning C4244: '=': conversion from 'real' to 'integer', possible loss of data +../numpy/linalg/lapack_lite/f2c_s_lapack.c(2882): warning C4244: '=': conversion from 'real' to 'integer', possible loss of data +../numpy/linalg/lapack_lite/f2c_s_lapack.c(2894): warning C4244: '=': conversion from 'real' to 'integer', possible loss of data +../numpy/random/src/mt19937/mt19937.c(88): warning C4146: unary minus operator applied to unsigned type, result still unsigned +../numpy/random/src/mt19937/mt19937.c(92): warning C4146: unary minus operator applied to unsigned type, result still unsigned +../numpy/random/src/mt19937/mt19937.c(95): warning C4146: unary minus operator applied to unsigned type, result still unsigned +..\numpy\random\src/pcg64/pcg64.h(342): warning C4146: unary minus operator applied to unsigned type, result still unsigned +C:\a\numpy\numpy\numpy\random\src\pcg64\pcg64.h(342): warning C4146: unary minus operator applied to unsigned type, result still unsigned +D:\a\numpy\numpy\numpy\random\src\pcg64\pcg64.h(342): warning C4146: unary minus operator applied to unsigned type, result still unsigned +cl : Command line warning D9025 : overriding '/arch:SSE2' with '/arch:AVX2' +numpy/random/_generator.cp312-win32.pyd.p/numpy/random/_generator.pyx.c(26329): warning C4244: 'function': conversion from 'int64_t' to 'double', possible loss of data +numpy/random/_generator.cp312-win32.pyd.p/numpy/random/_generator.pyx.c(38353): warning C4244: 'function': conversion from 'int64_t' to 'double', possible loss of data +numpy/random/_generator.cp312-win_arm64.pyd.p/numpy/random/_generator.pyx.c(26329): warning C4244: 'function': conversion from 'int64_t' to 'double', possible loss of data +numpy/random/_generator.cp312-win_arm64.pyd.p/numpy/random/_generator.pyx.c(38353): warning C4244: 'function': conversion from 'int64_t' to 'double', possible loss of data diff --git a/.github/meson_actions/action.yml b/.github/meson_actions/action.yml index 66868cbc3be0..476c0bbd7950 100644 --- a/.github/meson_actions/action.yml +++ b/.github/meson_actions/action.yml @@ -30,8 +30,7 @@ runs: TERM: xterm-256color run: | echo "::group::Installing Test Dependencies" - pip install pytest pytest-xdist pytest-timeout hypothesis typing_extensions - pip install -r requirements/setuptools_requirement.txt + python -m pip install -r requirements/test_requirements.txt echo "::endgroup::" echo "::group::Test NumPy" spin test -- --durations=10 --timeout=600 diff --git a/.github/windows_arm64_steps/action.yml b/.github/windows_arm64_steps/action.yml new file mode 100644 index 000000000000..8ecb3b8a0cdd --- /dev/null +++ b/.github/windows_arm64_steps/action.yml @@ -0,0 +1,22 @@ +name: Build Dependencies(Win-ARM64) +description: "Setup LLVM for Win-ARM64 builds" + +runs: + using: "composite" + steps: + - name: Install LLVM with checksum verification + shell: pwsh + run: | + Invoke-WebRequest https://github.com/llvm/llvm-project/releases/download/llvmorg-20.1.6/LLVM-20.1.6-woa64.exe -UseBasicParsing -OutFile LLVM-woa64.exe + $expectedHash = "92f69a1134e32e54b07d51c6e24d9594852f6476f32c3d70471ae00fffc2d462" + $fileHash = (Get-FileHash -Path "LLVM-woa64.exe" -Algorithm SHA256).Hash + if ($fileHash -ne $expectedHash) { + Write-Error "Checksum verification failed. The downloaded file may be corrupted or tampered with." + exit 1 + } + Start-Process -FilePath ".\LLVM-woa64.exe" -ArgumentList "/S" -Wait + echo "C:\Program Files\LLVM\bin" | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append + echo "CC=clang-cl" | Out-File -FilePath $env:GITHUB_ENV -Encoding utf8 -Append + echo "CXX=clang-cl" | Out-File -FilePath $env:GITHUB_ENV -Encoding utf8 -Append + echo "FC=flang-new" | Out-File -FilePath $env:GITHUB_ENV -Encoding utf8 -Append + diff --git a/.github/workflows/circleci.yml b/.github/workflows/circleci.yml index 3c84ce3c6890..eafe61098588 100644 --- a/.github/workflows/circleci.yml +++ b/.github/workflows/circleci.yml @@ -17,7 +17,7 @@ jobs: statuses: write steps: - name: GitHub Action step - uses: larsoner/circleci-artifacts-redirector-action@7eafdb60666f57706a5525a2f5eb76224dc8779b # master + uses: larsoner/circleci-artifacts-redirector-action@5d358ff96e96429a5c64a969bb4a574555439f4f # master with: repo-token: ${{ secrets.GITHUB_TOKEN }} api-token: ${{ secrets.CIRCLE_TOKEN }} diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index fb0dd766a1d8..aaa14b37588a 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -41,13 +41,13 @@ jobs: steps: - name: Checkout repository - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: persist-credentials: false # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@ff0a06e83cb2de871e5a09832bc6a81e7276941f # v3.28.18 + uses: github/codeql-action/init@1b168cd39490f61582a9beae412bb7057a6b2c4e # v4.31.8 with: languages: ${{ matrix.language }} # If you wish to specify custom queries, you can do so here or in a config file. @@ -57,7 +57,7 @@ jobs: # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). # If this step fails, then you should remove it and run the build manually (see below) - name: Autobuild - uses: github/codeql-action/autobuild@ff0a06e83cb2de871e5a09832bc6a81e7276941f # v3.28.18 + uses: github/codeql-action/autobuild@1b168cd39490f61582a9beae412bb7057a6b2c4e # v4.31.8 # â„šī¸ Command-line programs to run using the OS shell. # 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun @@ -70,6 +70,6 @@ jobs: # ./location_of_script_within_repo/buildscript.sh - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@ff0a06e83cb2de871e5a09832bc6a81e7276941f # v3.28.18 + uses: github/codeql-action/analyze@1b168cd39490f61582a9beae412bb7057a6b2c4e # v4.31.8 with: category: "/language:${{matrix.language}}" diff --git a/.github/workflows/compiler_sanitizers.yml b/.github/workflows/compiler_sanitizers.yml index 0581f7fc591b..8ad89759c906 100644 --- a/.github/workflows/compiler_sanitizers.yml +++ b/.github/workflows/compiler_sanitizers.yml @@ -8,6 +8,10 @@ on: branches: - main - maintenance/** + paths-ignore: + - '**.pyi' + - '**.md' + - '**.rst' defaults: run: @@ -21,12 +25,12 @@ permissions: contents: read # to fetch code (actions/checkout) jobs: - clang_ASAN: + clang_ASAN_UBSAN: # To enable this workflow on a fork, comment out: if: github.repository == 'numpy/numpy' runs-on: macos-latest steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: submodules: recursive fetch-tags: true @@ -45,16 +49,16 @@ jobs: pyenv --version - name: Set up LLVM run: | - brew install llvm@19 - LLVM_PREFIX=$(brew --prefix llvm@19) + brew install llvm@20 + LLVM_PREFIX=$(brew --prefix llvm@20) echo CC="$LLVM_PREFIX/bin/clang" >> $GITHUB_ENV echo CXX="$LLVM_PREFIX/bin/clang++" >> $GITHUB_ENV echo LDFLAGS="-L$LLVM_PREFIX/lib" >> $GITHUB_ENV echo CPPFLAGS="-I$LLVM_PREFIX/include" >> $GITHUB_ENV - name: Build Python with address sanitizer run: | - CONFIGURE_OPTS="--with-address-sanitizer" pyenv install 3.14t - pyenv global 3.14t + CONFIGURE_OPTS="--with-address-sanitizer" pyenv install 3.14 + pyenv global 3.14 - name: Install dependencies run: | pip install -r requirements/build_requirements.txt @@ -64,12 +68,14 @@ jobs: pip uninstall -y pytest-xdist - name: Build run: - python -m spin build -j2 -- -Db_sanitize=address + python -m spin build -- -Db_sanitize=address,undefined -Db_lundef=false - name: Test run: | # pass -s to pytest to see ASAN errors and warnings, otherwise pytest captures them + # Ignore test_casting_floatingpoint_errors on macOS for now - causes crash inside UBSAN ASAN_OPTIONS=detect_leaks=0:symbolize=1:strict_init_order=true:allocator_may_return_null=1 \ - python -m spin test -- -v -s --timeout=600 --durations=10 + UBSAN_OPTIONS=halt_on_error=1:suppressions=${GITHUB_WORKSPACE}/tools/ci/ubsan_suppressions_arm64.txt \ + python -m spin test -- -k "not test_casting_floatingpoint_errors" -v -s --timeout=600 --durations=10 clang_TSAN: # To enable this workflow on a fork, comment out: @@ -78,9 +84,9 @@ jobs: container: image: ghcr.io/nascheme/numpy-tsan:3.14t options: --shm-size=2g # increase memory for large matrix ops - + steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 - name: Trust working directory and initialize submodules run: | git config --global --add safe.directory /__w/numpy/numpy @@ -88,8 +94,11 @@ jobs: - name: Uninstall pytest-xdist (conflicts with TSAN) run: pip uninstall -y pytest-xdist + - name: Upgrade spin (gh-29777) + run: pip install -U spin + - name: Build NumPy with ThreadSanitizer - run: python -m spin build -j2 -- -Db_sanitize=thread + run: python -m spin build -- -Db_sanitize=thread - name: Run tests under prebuilt TSAN container run: | diff --git a/.github/workflows/cygwin.yml b/.github/workflows/cygwin.yml index 174d04efb567..a8bef06a5f5c 100644 --- a/.github/workflows/cygwin.yml +++ b/.github/workflows/cygwin.yml @@ -4,6 +4,10 @@ on: branches: - main - maintenance/** + paths-ignore: + - '**.pyi' + - '**.md' + - '**.rst' concurrency: group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} @@ -18,7 +22,7 @@ jobs: # To enable this workflow on a fork, comment out: if: github.repository == 'numpy/numpy' steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: submodules: recursive fetch-tags: true @@ -63,7 +67,7 @@ jobs: cd tools /usr/bin/python3.9 -m pytest --pyargs numpy -n2 -m "not slow" - name: Upload wheel if tests fail - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 + uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6.0.0 if: failure() with: name: numpy-cygwin-wheel diff --git a/.github/workflows/dependency-review.yml b/.github/workflows/dependency-review.yml index 5036a94ce399..a4bec8af6d82 100644 --- a/.github/workflows/dependency-review.yml +++ b/.github/workflows/dependency-review.yml @@ -15,10 +15,10 @@ jobs: runs-on: ubuntu-latest steps: - name: 'Checkout Repository' - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: persist-credentials: false - name: 'Dependency Review' - uses: actions/dependency-review-action@da24556b548a50705dd671f47852072ea4c105d9 # v4.7.1 + uses: actions/dependency-review-action@3c4e3dcb1aa7874d2c16be7d79418e9b7efd6261 # v4.8.2 with: allow-ghsas: GHSA-cx63-2mw6-8hw5 diff --git a/.github/workflows/emscripten.yml b/.github/workflows/emscripten.yml index fea77068e128..099f5f67336b 100644 --- a/.github/workflows/emscripten.yml +++ b/.github/workflows/emscripten.yml @@ -5,25 +5,13 @@ on: branches: - main - maintenance/** - # Note: this workflow gets triggered on the same schedule as the - # wheels.yml workflow to upload WASM wheels to Anaconda.org. - schedule: - # ┌───────────── minute (0 - 59) - # │ ┌───────────── hour (0 - 23) - # │ │ ┌───────────── day of the month (1 - 31) - # │ │ │ ┌───────────── month (1 - 12 or JAN-DEC) - # │ │ │ │ ┌───────────── day of the week (0 - 6 or SUN-SAT) - # │ │ │ │ │ - - cron: "42 2 * * SUN,WED" - workflow_dispatch: - inputs: - push_wheels: - # Can be 'true' or 'false'. Default is 'false'. - # Warning: this will overwrite existing wheels. - description: > - Push wheels to Anaconda.org if the build succeeds - required: false - default: 'false' + paths-ignore: + - '**.pyi' + - '**.md' + - '**.rst' + +permissions: + contents: read # to fetch code (actions/checkout) env: FORCE_COLOR: 3 @@ -35,51 +23,19 @@ concurrency: jobs: build-wasm-emscripten: - permissions: - contents: read # to fetch code (actions/checkout) - name: Build NumPy distribution for Pyodide + name: Pyodide test runs-on: ubuntu-22.04 # To enable this workflow on a fork, comment out: if: github.repository == 'numpy/numpy' steps: - name: Checkout NumPy - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: submodules: recursive fetch-tags: true persist-credentials: false - - uses: pypa/cibuildwheel@faf86a6ed7efa889faf6996aa23820831055001a # 2.23.3 + - uses: pypa/cibuildwheel@63fd63b352a9a8bdcc24791c9dbee952ee9a8abc # v3.3.0 env: CIBW_PLATFORM: pyodide - - - name: Upload wheel artifact(s) - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 - with: - name: cp312-pyodide_wasm32 - path: ./wheelhouse/*.whl - if-no-files-found: error - - # Push to https://anaconda.org/scientific-python-nightly-wheels/numpy - # WARNING: this job will overwrite any existing WASM wheels. - upload-wheels: - name: Upload NumPy WASM wheels to Anaconda.org - runs-on: ubuntu-22.04 - permissions: {} - needs: [build-wasm-emscripten] - if: >- - (github.repository == 'numpy/numpy') && - (github.event_name == 'workflow_dispatch' && github.event.inputs.push_wheels == 'true') || - (github.event_name == 'schedule') - steps: - - name: Download wheel artifact(s) - uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 # v4.3.0 - with: - path: wheelhouse/ - merge-multiple: true - - - name: Push to Anaconda PyPI index - uses: scientific-python/upload-nightly-action@b36e8c0c10dbcfd2e05bf95f17ef8c14fd708dbf # v0.6.2 - with: - artifacts_path: wheelhouse/ - anaconda_nightly_upload_token: ${{ secrets.NUMPY_NIGHTLY_UPLOAD_TOKEN }} + CIBW_BUILD: cp312-* diff --git a/.github/workflows/linux-ppc64le.yml b/.github/workflows/linux-ppc64le.yml new file mode 100644 index 000000000000..7e75786b0bea --- /dev/null +++ b/.github/workflows/linux-ppc64le.yml @@ -0,0 +1,71 @@ +name: Native ppc64le Linux Test + +on: + pull_request: + branches: + - main + - maintenance/** + paths-ignore: + - '**.pyi' + - '**.md' + - '**.rst' + workflow_dispatch: + +permissions: + contents: read + +concurrency: + group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} + cancel-in-progress: true + +jobs: + native_ppc64le: + # This job runs only in the main NumPy repository. + # It requires a native ppc64le GHA runner, which is not available on forks. + # For more details, see: https://github.com/numpy/numpy/issues/29125 + if: github.repository == 'numpy/numpy' + runs-on: ubuntu-24.04-ppc64le-p10 + + strategy: + fail-fast: false + matrix: + config: + - name: "GCC" + args: "-Dallow-noblas=false" + - name: "clang" + args: "-Dallow-noblas=false" + + name: "${{ matrix.config.name }}" + steps: + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 + with: + submodules: recursive + fetch-tags: true + + - name: Install dependencies + run: | + sudo apt update + sudo apt install -y python3.12 python3-pip python3-dev ninja-build gfortran \ + build-essential libopenblas-dev liblapack-dev pkg-config + pip install --upgrade pip + pip install -r requirements/build_requirements.txt -r requirements/test_requirements.txt + echo "/home/runner/.local/bin" >> $GITHUB_PATH + + - name: Install clang + if: matrix.config.name == 'clang' + run: | + sudo apt install -y clang + export CC=clang + export CXX=clang++ + + - name: Meson Build + run: | + spin build -- ${{ matrix.config.args }} + + - name: Meson Log + if: always() + run: cat build/meson-logs/meson-log.txt + + - name: Run Tests + run: | + spin test -- --timeout=60 --durations=10 diff --git a/.github/workflows/linux.yml b/.github/workflows/linux.yml index dc9ef34db71d..c42e3e95a5d7 100644 --- a/.github/workflows/linux.yml +++ b/.github/workflows/linux.yml @@ -14,6 +14,10 @@ on: branches: - main - maintenance/** + paths-ignore: + - '**.pyi' + - '**.md' + - '**.rst' defaults: run: @@ -33,14 +37,14 @@ jobs: runs-on: ubuntu-latest continue-on-error: true steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: submodules: recursive fetch-depth: 0 persist-credentials: false - - uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0 + - uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 with: - python-version: '3.11' + python-version: '3.12' - name: Install linter requirements run: python -m pip install -r requirements/linter_requirements.txt @@ -49,6 +53,10 @@ jobs: BASE_REF: ${{ github.base_ref }} run: python tools/linter.py + - name: Check Python.h is first file included + run: | + python tools/check_python_h_first.py + smoke_test: # To enable this job on a fork, comment out: @@ -58,43 +66,24 @@ jobs: MESON_ARGS: "-Dallow-noblas=true -Dcpu-baseline=none -Dcpu-dispatch=none" strategy: matrix: - version: ["3.11", "3.12", "3.13", "3.14-dev", "3.14t-dev"] + version: ["3.12", "3.13", "3.14", "3.14t"] steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: submodules: recursive fetch-tags: true persist-credentials: false - - uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0 + - uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 with: python-version: ${{ matrix.version }} - uses: ./.github/meson_actions - pypy: - needs: [smoke_test] - runs-on: ubuntu-latest - if: github.event_name != 'push' - steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - with: - submodules: recursive - fetch-tags: true - persist-credentials: false - - uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0 - with: - python-version: 'pypy3.11-v7.3.19' - - name: Setup using scipy-openblas - run: | - python -m pip install -r requirements/ci_requirements.txt - spin config-openblas --with-scipy-openblas=32 - - uses: ./.github/meson_actions - debug: needs: [smoke_test] runs-on: ubuntu-24.04 if: github.event_name != 'push' steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: submodules: recursive fetch-tags: true @@ -124,14 +113,14 @@ jobs: needs: [smoke_test] runs-on: ubuntu-22.04 steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: submodules: recursive fetch-tags: true persist-credentials: false - - uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0 + - uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 with: - python-version: '3.11' + python-version: '3.12' - name: Install build and test dependencies from PyPI run: | pip install -r requirements/build_requirements.txt @@ -156,63 +145,26 @@ jobs: # TODO: gcov env: PYTHONOPTIMIZE: 2 - - - aarch64_test: - needs: [smoke_test] - if: github.repository == 'numpy/numpy' - runs-on: ubuntu-22.04-arm - steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - with: - submodules: recursive - fetch-tags: true - persist-credentials: false - - - uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0 - with: - python-version: '3.11' - - - name: Install Python dependencies - run: | - python -m pip install -r requirements/build_requirements.txt - python -m pip install -r requirements/test_requirements.txt - python -m pip install -r requirements/ci32_requirements.txt - mkdir -p ./.openblas - python -c"import scipy_openblas32 as ob32; print(ob32.get_pkg_config())" > ./.openblas/scipy-openblas.pc - - - name: Build - env: - PKG_CONFIG_PATH: ${{ github.workspace }}/.openblas - run: | - spin build - - - name: Test - run: | - spin test -j2 -m full -- --timeout=600 --durations=10 - armhf_test: # Tests NumPy on 32-bit ARM hard-float (armhf) via compatibility mode # running on aarch64 (ARM 64-bit) GitHub runners. needs: [smoke_test] if: github.repository == 'numpy/numpy' - runs-on: ubuntu-22.04-arm + runs-on: ubuntu-24.04-arm steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: submodules: recursive fetch-tags: true persist-credentials: false - + - name: Creates new container run: | docker run --name the_container --interactive \ - -v $(pwd):/numpy arm32v7/ubuntu:22.04 /bin/linux32 /bin/bash -c " + -v $(pwd):/numpy arm32v7/ubuntu:24.04 /bin/linux32 /bin/bash -c " apt update && - apt install -y ninja-build cmake git python3 python-is-python3 python3-dev python3-pip python3-venv && - python -m pip install -r /numpy/requirements/build_requirements.txt && - python -m pip install -r /numpy/requirements/test_requirements.txt + apt install -y ninja-build cmake git python3 python-is-python3 python3-dev python3-pip python3-venv " docker commit the_container the_container @@ -221,7 +173,12 @@ jobs: docker run --rm -e "TERM=xterm-256color" \ -v $(pwd):/numpy the_container \ /bin/script -e -q -c "/bin/linux32 /bin/bash --noprofile --norc -eo pipefail -c ' - cd /numpy && spin build + cd /numpy && + python -m venv venv && + source venv/bin/activate && + python -m pip install -r /numpy/requirements/build_requirements.txt && + python -m pip install -r /numpy/requirements/test_requirements.txt && + spin build '" - name: Meson Log @@ -233,7 +190,7 @@ jobs: docker run --rm -e "TERM=xterm-256color" \ -v $(pwd):/numpy the_container \ /bin/script -e -q -c "/bin/linux32 /bin/bash --noprofile --norc -eo pipefail -c ' - cd /numpy && spin test -m full -- --timeout=600 --durations=10 + cd /numpy && source venv/bin/activate && spin test -m full -- --timeout=600 --durations=10 '" benchmark: @@ -241,19 +198,19 @@ jobs: runs-on: ubuntu-latest if: github.event_name != 'push' steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: submodules: recursive fetch-tags: true persist-credentials: false - - uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0 + - uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 with: - python-version: '3.11' + python-version: '3.12' - name: Install build and benchmarking dependencies run: | sudo apt-get update sudo apt-get install libopenblas-dev ninja-build - pip install asv virtualenv packaging -r requirements/build_requirements.txt + pip install "asv<0.6.5" virtualenv packaging -r requirements/build_requirements.txt - name: Install NumPy run: | spin build -- -Dcpu-dispatch=none @@ -271,7 +228,7 @@ jobs: # - name: Check docstests # shell: 'script -q -e -c "bash --noprofile --norc -eo pipefail {0}"' # run: | - # pip install scipy-doctest==1.6.0 hypothesis==6.104.1 matplotlib scipy pytz pandas + # pip install -r requirements/doc_requirements.txt -r requirements/test_requirements.txt # spin check-docs -v # spin check-tutorials -v @@ -280,14 +237,14 @@ jobs: runs-on: ubuntu-latest if: github.event_name != 'push' steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: submodules: recursive fetch-tags: true persist-credentials: false - - uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0 + - uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 with: - python-version: '3.11' + python-version: '3.12' - name: Install gfortran and setup OpenBLAS (sdist build) run: | set -xe @@ -308,6 +265,12 @@ jobs: run: | cd tools pytest --pyargs numpy -m "not slow" + - name: Test SWIG binding + run: | + sudo apt update + sudo apt install make swig + pip install setuptools + make -C tools/swig/test test array_api_tests: needs: [smoke_test] @@ -315,23 +278,23 @@ jobs: if: github.event_name != 'push' steps: - name: Checkout NumPy - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: submodules: recursive fetch-tags: true persist-credentials: false - name: Checkout array-api-tests - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: repository: data-apis/array-api-tests - ref: 'c48410f96fc58e02eea844e6b7f6cc01680f77ce' # Latest commit as of 2025-04-01 + ref: '3c273cd34d51c64ed893737306d36adab23a94a1' # v2025.05.23 submodules: 'true' path: 'array-api-tests' persist-credentials: false - name: Set up Python - uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0 + uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 with: - python-version: '3.11' + python-version: '3.12' - name: Install build and test dependencies from PyPI run: | python -m pip install -r requirements/build_requirements.txt @@ -346,21 +309,29 @@ jobs: PYTHONWARNINGS: 'ignore::UserWarning::,ignore::DeprecationWarning::,ignore::RuntimeWarning::' run: | cd ${GITHUB_WORKSPACE}/array-api-tests - pytest array_api_tests -v -c pytest.ini --ci --max-examples=100 --derandomize --disable-deadline --xfails-file ${GITHUB_WORKSPACE}/tools/ci/array-api-xfails.txt + pytest array_api_tests -v -c pytest.ini -n 4 --max-examples=1000 --derandomize --disable-deadline --xfails-file ${GITHUB_WORKSPACE}/tools/ci/array-api-xfails.txt custom_checks: needs: [smoke_test] runs-on: ubuntu-latest if: github.event_name != 'push' steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: submodules: recursive fetch-tags: true persist-credentials: false - - uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0 + - uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 with: - python-version: '3.11' + python-version: '3.12' + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 + with: + repository: numpy/numpy-release + path: numpy-release + persist-credentials: false + - name: Check scipy-openblas version in release pipelines + run: | + python tools/check_openblas_version.py --req-files numpy-release/requirements/openblas_requirements.txt - name: Install build and test dependencies from PyPI run: | pip install -r requirements/build_requirements.txt @@ -379,9 +350,53 @@ jobs: - name: Check for unreachable code paths in Python modules run: | # Need the explicit `bash -c` here because `grep` returns exit code 1 for no matches - bash -c "! vulture . --min-confidence 100 --exclude doc/,numpy/distutils/,vendored-meson/ | grep 'unreachable'" + bash -c "! vulture . --min-confidence 100 --exclude doc/,vendored-meson/ | grep 'unreachable'" - name: Check usage of install_tag run: | rm -rf build-install ./vendored-meson/meson/meson.py install -C build --destdir ../build-install --tags=runtime,python-runtime,devel python tools/check_installed_files.py $(find ./build-install -path '*/site-packages/numpy') --no-tests + + + Linux_Python_312_32bit_full: + name: i686, cp312, full + needs: [smoke_test] + runs-on: ubuntu-latest + container: + # There are few options for i686 images at https://quay.io/organization/pypa, + # use the glibc2.28 one + image: quay.io/pypa/manylinux_2_28_i686 + + steps: + - name: Checkout and initialize submodules + # actions/checkout doesn't work in a container image + run: | + git config --global --add safe.directory $PWD + if [ $GITHUB_EVENT_NAME != pull_request ]; then + git clone --recursive --branch=$GITHUB_REF_NAME https://github.com/${GITHUB_REPOSITORY}.git $GITHUB_WORKSPACE + git reset --hard $GITHUB_SHA + else + git clone --recursive https://github.com/${GITHUB_REPOSITORY}.git $GITHUB_WORKSPACE + git fetch origin $GITHUB_REF:my_ref_name + git checkout $GITHUB_BASE_REF + git -c user.email="you@example.com" merge --no-commit my_ref_name + fi + git submodule update --init --recursive + + - name: build + run: | + python3.12 -m venv venv + source venv/bin/activate + pip install --upgrade pip + pip install -r requirements/ci32_requirements.txt + pip install -r requirements/test_requirements.txt + + spin config-openblas --with-scipy-openblas=32 + export PKG_CONFIG_PATH=$(pwd)/.openblas + python -m pip install . -v -Csetup-args="-Dallow-noblas=false" + + - name: test + run: | + source venv/bin/activate + cd tools + python -m pytest --pyargs numpy diff --git a/.github/workflows/linux_blas.yml b/.github/workflows/linux_blas.yml index 54d217cc12fb..88c5322dd754 100644 --- a/.github/workflows/linux_blas.yml +++ b/.github/workflows/linux_blas.yml @@ -40,6 +40,10 @@ on: branches: - main - maintenance/** + paths-ignore: + - '**.pyi' + - '**.md' + - '**.rst' defaults: run: @@ -65,18 +69,18 @@ jobs: USE_NIGHTLY_OPENBLAS: ${{ matrix.USE_NIGHTLY_OPENBLAS }} name: "Test Linux (${{ matrix.USE_NIGHTLY_OPENBLAS && 'nightly' || 'stable' }} OpenBLAS)" steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: submodules: recursive fetch-tags: true persist-credentials: false - - uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0 + - uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 with: - python-version: '3.11' + python-version: '3.12' - name: Install dependencies run: | - pip install -r requirements/build_requirements.txt + pip install -r requirements/build_requirements.txt -r requirements/test_requirements.txt # Install OpenBLAS if [[ $USE_NIGHTLY_OPENBLAS == "true" ]]; then python -m pip install -i https://pypi.anaconda.org/scientific-python-nightly-wheels/simple scipy-openblas32 @@ -85,16 +89,13 @@ jobs: fi mkdir -p ./.openblas python -c"import scipy_openblas32 as ob32; print(ob32.get_pkg_config())" > ./.openblas/scipy-openblas.pc - echo "PKG_CONFIG_PATH=${{ github.workspace }}/.openblas" >> $GITHUB_ENV - ld_library_path=$(python -c"import scipy_openblas32 as ob32; print(ob32.get_lib_dir())") - echo "LD_LIBRARY_PATH=$ld_library_path" >> $GITHUB_ENV - name: Build shell: 'script -q -e -c "bash --noprofile --norc -eo pipefail {0}"' env: TERM: xterm-256color run: - spin build -- --werror -Dallow-noblas=false + spin build -- --werror -Dallow-noblas=false -Dpkg_config_path=${PWD}/.openblas - name: Check build-internal dependencies run: @@ -106,14 +107,13 @@ jobs: - name: Ensure scipy-openblas run: | set -ex - spin python tools/check_openblas_version.py 0.3.26 + spin python tools/check_openblas_version.py -- --min-version 0.3.30 - name: Test shell: 'script -q -e -c "bash --noprofile --norc -eo pipefail {0}"' env: TERM: xterm-256color run: | - pip install pytest pytest-xdist hypothesis typing_extensions pytest-timeout spin test -j auto -- --timeout=600 --durations=10 @@ -127,7 +127,7 @@ jobs: run: | dnf install git gcc-gfortran g++ python3-devel openblas-devel -y - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: submodules: recursive fetch-tags: true @@ -135,8 +135,7 @@ jobs: - name: Install dependencies run: | - pip install -r requirements/build_requirements.txt - pip install pytest hypothesis typing_extensions pytest-timeout + pip install -r requirements/build_requirements.txt -r requirements/test_requirements.txt - name: Build (LP64) run: spin build -- -Dblas=openblas -Dlapack=openblas -Ddisable-optimization=true -Dallow-noblas=false @@ -163,7 +162,7 @@ jobs: run: | dnf install git gcc-gfortran g++ python3-devel flexiblas-devel -y - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: submodules: recursive fetch-tags: true @@ -171,8 +170,7 @@ jobs: - name: Install dependencies run: | - pip install -r requirements/build_requirements.txt - pip install pytest hypothesis typing_extensions pytest-timeout + pip install -r requirements/build_requirements.txt -r requirements/test_requirements.txt - name: Build run: spin build -- -Ddisable-optimization=true -Dallow-noblas=false @@ -194,19 +192,18 @@ jobs: runs-on: ubuntu-latest name: "OpenBLAS with CMake" steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: submodules: recursive fetch-tags: true persist-credentials: false - - uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0 + - uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 with: - python-version: '3.11' + python-version: '3.12' - name: Install dependencies run: | - pip install -r requirements/build_requirements.txt - pip install pytest pytest-xdist hypothesis typing_extensions pytest-timeout + pip install -r requirements/build_requirements.txt -r requirements/test_requirements.txt sudo apt-get update sudo apt-get install libopenblas-dev cmake sudo apt-get remove pkg-config @@ -217,24 +214,24 @@ jobs: - name: Test run: spin test -j auto -- numpy/linalg --timeout=600 --durations=10 - + netlib-debian: if: github.repository == 'numpy/numpy' runs-on: ubuntu-latest name: "Debian libblas/liblapack" steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: submodules: recursive fetch-tags: true persist-credentials: false - - uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0 + - uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 with: - python-version: '3.11' + python-version: '3.12' - name: Install dependencies run: | - pip install -r requirements/build_requirements.txt + pip install -r requirements/build_requirements.txt -r requirements/test_requirements.txt sudo apt-get update sudo apt-get install liblapack-dev pkg-config @@ -244,7 +241,6 @@ jobs: - name: Test run: | - pip install pytest pytest-xdist hypothesis typing_extensions pytest-timeout spin test -j auto -- numpy/linalg --timeout=600 --durations=10 @@ -260,7 +256,7 @@ jobs: # If it is needed in the future, use install name `pkgconf-pkg-config` zypper install -y git gcc-c++ python3-pip python3-devel blas cblas lapack - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: submodules: recursive fetch-tags: true @@ -276,7 +272,9 @@ jobs: - name: Test run: | - pip install --break-system-packages pytest pytest-xdist hypothesis typing_extensions pytest-timeout + # do not use test_requirements.txt, it includes coverage which requires + # sqlite3, which is not available on OpenSUSE python + pip install --break-system-packages pytest pytest-xdist hypothesis pytest-timeout spin test -j auto -- numpy/linalg --timeout=600 --durations=10 @@ -285,19 +283,19 @@ jobs: runs-on: ubuntu-latest name: "MKL (LP64, ILP64, SDL)" steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: submodules: recursive fetch-tags: true persist-credentials: false - - uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0 + - uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 with: - python-version: '3.11' + python-version: '3.12' - name: Install dependencies run: | pip install -r requirements/build_requirements.txt - pip install pytest pytest-xdist hypothesis typing_extensions pytest-timeout + pip install -r requirements/build_requirements.txt -r requirements/test_requirements.txt pip install mkl mkl-devel - name: Repair MKL pkg-config files and symlinks @@ -349,19 +347,19 @@ jobs: runs-on: ubuntu-latest name: "BLIS" steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: submodules: recursive fetch-tags: true persist-credentials: false - - uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0 + - uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 with: - python-version: '3.11' + python-version: '3.12' - name: Install dependencies run: | pip install -r requirements/build_requirements.txt - pip install pytest pytest-xdist hypothesis typing_extensions pytest-timeout + pip install -r requirements/build_requirements.txt -r requirements/test_requirements.txt sudo apt-get update sudo apt-get install libblis-dev libopenblas-dev pkg-config @@ -386,19 +384,19 @@ jobs: runs-on: ubuntu-latest name: "ATLAS" steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: submodules: recursive fetch-tags: true persist-credentials: false - - uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0 + - uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 with: - python-version: '3.11' + python-version: '3.12' - name: Install dependencies run: | pip install -r requirements/build_requirements.txt - pip install pytest pytest-xdist hypothesis typing_extensions pytest-timeout + pip install -r requirements/build_requirements.txt -r requirements/test_requirements.txt sudo apt-get update sudo apt-get install libatlas-base-dev pkg-config @@ -407,4 +405,3 @@ jobs: - name: Test run: spin test -- numpy/linalg - diff --git a/.github/workflows/linux_musl.yml b/.github/workflows/linux_musl.yml deleted file mode 100644 index 547c031bc84b..000000000000 --- a/.github/workflows/linux_musl.yml +++ /dev/null @@ -1,69 +0,0 @@ -name: Test musllinux_x86_64 - -on: - pull_request: - branches: - - main - - maintenance/** - - -concurrency: - group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} - cancel-in-progress: true - - -permissions: - contents: read # to fetch code (actions/checkout) - - -jobs: - musllinux_x86_64: - runs-on: ubuntu-latest - # To enable this workflow on a fork, comment out: - if: github.repository == 'numpy/numpy' - container: - # Use container used for building musllinux wheels - # it has git installed, all the pythons, etc - image: quay.io/pypa/musllinux_1_2_x86_64 - - steps: - - name: setup - run: | - apk update --quiet - - # using git commands to clone because versioneer doesn't work when - # actions/checkout is used for the clone step in a container - - git config --global --add safe.directory $PWD - - if [ $GITHUB_EVENT_NAME != pull_request ]; then - git clone --recursive --branch=$GITHUB_REF_NAME https://github.com/${GITHUB_REPOSITORY}.git $GITHUB_WORKSPACE - git reset --hard $GITHUB_SHA - else - git clone --recursive https://github.com/${GITHUB_REPOSITORY}.git $GITHUB_WORKSPACE - git fetch origin $GITHUB_REF:my_ref_name - git checkout $GITHUB_BASE_REF - git -c user.email="you@example.com" merge --no-commit my_ref_name - fi - git submodule update --init - - ln -s /usr/local/bin/python3.11 /usr/local/bin/python - - - name: test-musllinux_x86_64 - env: - PKG_CONFIG_PATH: ${{ github.workspace }}/.openblas - run: | - python -m venv test_env - source test_env/bin/activate - - pip install -r requirements/ci_requirements.txt - pip install -r requirements/build_requirements.txt -r requirements/test_requirements.txt - - # use meson to build and test - spin build --with-scipy-openblas=64 - spin test -j auto -- --timeout=600 --durations=10 - - - name: Meson Log - shell: bash - run: | - cat build/meson-logs/meson-log.txt diff --git a/.github/workflows/linux_qemu.yml b/.github/workflows/linux_qemu.yml index 1293e9c37c2f..f82bf1aa2626 100644 --- a/.github/workflows/linux_qemu.yml +++ b/.github/workflows/linux_qemu.yml @@ -14,6 +14,10 @@ on: branches: - main - maintenance/** + paths-ignore: + - '**.pyi' + - '**.md' + - '**.rst' workflow_dispatch: defaults: @@ -38,22 +42,6 @@ jobs: fail-fast: false matrix: BUILD_PROP: - - [ - "ppc64le", - "powerpc64le-linux-gnu", - "ppc64le/ubuntu:22.04", - "-Dallow-noblas=true", - "test_kind or test_multiarray or test_simd or test_umath or test_ufunc", - "ppc64le" - ] - - [ - "ppc64le - baseline(Power9)", - "powerpc64le-linux-gnu", - "ppc64le/ubuntu:22.04", - "-Dallow-noblas=true -Dcpu-baseline=vsx3", - "test_kind or test_multiarray or test_simd or test_umath or test_ufunc", - "ppc64le" - ] - [ "s390x", "s390x-linux-gnu", @@ -91,7 +79,7 @@ jobs: name: "${{ matrix.BUILD_PROP[0] }}" steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: submodules: recursive fetch-tags: true @@ -108,7 +96,7 @@ jobs: sudo apt install -y ninja-build gcc-${TOOLCHAIN_NAME} g++-${TOOLCHAIN_NAME} gfortran-${TOOLCHAIN_NAME} - name: Cache docker container - uses: actions/cache@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3 + uses: actions/cache@9255dc7a253b0ccc959486e2bca901246202afeb # v5.0.1 id: container-cache with: path: ~/docker_${{ matrix.BUILD_PROP[1] }} @@ -120,7 +108,9 @@ jobs: docker run --platform=linux/${ARCH} --name the_container --interactive \ -v /:/host -v $(pwd):/numpy ${DOCKER_CONTAINER} /bin/bash -c " apt update && - apt install -y cmake git python3 python-is-python3 python3-dev python3-pip && + apt install -y cmake git curl ca-certificates && + curl -LsSf https://astral.sh/uv/install.sh | sh && + export PATH="/root/.local/bin:$PATH" && mkdir -p /lib64 && ln -s /host/lib64/ld-* /lib64/ && ln -s /host/lib/x86_64-linux-gnu /lib/x86_64-linux-gnu && rm -rf /usr/${TOOLCHAIN_NAME} && ln -s /host/usr/${TOOLCHAIN_NAME} /usr/${TOOLCHAIN_NAME} && @@ -136,8 +126,9 @@ jobs: git config --global --add safe.directory /numpy && # No need to build ninja from source, the host ninja is used for the build grep -v ninja /numpy/requirements/build_requirements.txt > /tmp/build_requirements.txt && - python -m pip install -r /tmp/build_requirements.txt && - python -m pip install pytest pytest-xdist hypothesis typing_extensions pytest-timeout && + uv venv --python 3.12 .venv && + source .venv/bin/activate && + uv pip install -r /tmp/build_requirements.txt pytest pytest-xdist hypothesis pytest-timeout rm -f /usr/local/bin/ninja && mkdir -p /usr/local/bin && ln -s /host/usr/bin/ninja /usr/local/bin/ninja " docker commit the_container the_container @@ -153,7 +144,7 @@ jobs: docker run --rm --platform=linux/${ARCH} -e "TERM=xterm-256color" \ -v $(pwd):/numpy -v /:/host the_container \ /bin/script -e -q -c "/bin/bash --noprofile --norc -eo pipefail -c ' - cd /numpy && spin build --clean -- ${MESON_OPTIONS} + source .venv/bin/activate && cd /numpy && spin build --clean -- ${MESON_OPTIONS} '" - name: Meson Log @@ -166,7 +157,7 @@ jobs: -v $(pwd):/numpy -v /:/host the_container \ /bin/script -e -q -c "/bin/bash --noprofile --norc -eo pipefail -c ' export F90=/usr/bin/gfortran - cd /numpy && spin test -- --timeout=600 --durations=10 -k \"${RUNTIME_TEST_FILTER}\" + source .venv/bin/activate && cd /numpy && spin test -- --timeout=600 --durations=10 -k \"${RUNTIME_TEST_FILTER}\" '" @@ -198,14 +189,14 @@ jobs: name: "${{ matrix.BUILD_PROP[0] }}" steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: submodules: recursive fetch-tags: true - name: Initialize binfmt_misc for qemu-user-static run: | - docker run --rm --privileged loongcr.lcpu.dev/multiarch/archlinux --reset -p yes + docker run --rm --privileged tonistiigi/binfmt:qemu-v10.0.4-56 --install all - name: Install GCC cross-compilers run: | @@ -213,7 +204,7 @@ jobs: sudo apt install -y ninja-build gcc-14-${TOOLCHAIN_NAME} g++-14-${TOOLCHAIN_NAME} gfortran-14-${TOOLCHAIN_NAME} - name: Cache docker container - uses: actions/cache@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3 + uses: actions/cache@9255dc7a253b0ccc959486e2bca901246202afeb # v5.0.1 id: container-cache with: path: ~/docker_${{ matrix.BUILD_PROP[1] }} @@ -228,6 +219,8 @@ jobs: ln -s /host/lib/x86_64-linux-gnu /lib/x86_64-linux-gnu && ln -s /host/usr/${TOOLCHAIN_NAME} /usr/${TOOLCHAIN_NAME} && ln -s /host/usr/lib/gcc-cross/${TOOLCHAIN_NAME} /usr/lib/gcc/${TOOLCHAIN_NAME} && + mkdir -p /usr/libexec/gcc && + rm -rf /usr/libexec/gcc/${TOOLCHAIN_NAME} && ln -s /host/usr/libexec/gcc/${TOOLCHAIN_NAME} /usr/libexec/gcc/${TOOLCHAIN_NAME} && rm -f /usr/bin/gcc && ln -s /host/usr/bin/${TOOLCHAIN_NAME}-gcc-14 /usr/bin/gcc && rm -f /usr/bin/g++ && ln -s /host/usr/bin/${TOOLCHAIN_NAME}-g++-14 /usr/bin/g++ && rm -f /usr/bin/gfortran && ln -s /host/usr/bin/${TOOLCHAIN_NAME}-gfortran-14 /usr/bin/gfortran && @@ -237,8 +230,14 @@ jobs: rm -f /usr/bin/ld.bfd && ln -s /host/usr/bin/${TOOLCHAIN_NAME}-ld.bfd /usr/bin/ld.bfd && rm -f /usr/bin/ninja && ln -s /host/usr/bin/ninja /usr/bin/ninja && git config --global --add safe.directory /numpy && - python -m pip install --break-system-packages -r /numpy/requirements/build_requirements.txt && - python -m pip install --break-system-packages pytest pytest-xdist hypothesis typing_extensions + # No need to build ninja from source, the host ninja is used for the build + grep -v ninja /numpy/requirements/build_requirements.txt > /tmp/build_requirements.txt && + python -m pip install --break-system-packages uv --extra-index-url https://mirrors.loong64.com/pypi/simple && + export PATH="/root/.local/bin:$PATH" && + uv venv --python 3.12 .venv && + source .venv/bin/activate && + uv pip install -r /tmp/build_requirements.txt pytest pytest-xdist hypothesis && + rm -f /usr/local/bin/ninja && mkdir -p /usr/local/bin && ln -s /host/usr/bin/ninja /usr/local/bin/ninja " docker commit the_container the_container mkdir -p "~/docker_${TOOLCHAIN_NAME}" @@ -253,7 +252,7 @@ jobs: docker run --rm --platform=linux/${ARCH} -e "TERM=xterm-256color" \ -v $(pwd):/numpy -v /:/host the_container \ /bin/script -e -q -c "/bin/bash --noprofile --norc -eo pipefail -c ' - cd /numpy/ && spin build --clean -- ${MESON_OPTIONS} + source .venv/bin/activate && cd /numpy/ && spin build --clean -- ${MESON_OPTIONS} '" - name: Meson Log @@ -265,5 +264,5 @@ jobs: docker run --rm --platform=linux/${ARCH} -e "TERM=xterm-256color" \ -v $(pwd):/numpy -v /:/host the_container \ /bin/script -e -q -c "/bin/bash --noprofile --norc -eo pipefail -c ' - cd /numpy && spin test -- -k \"${RUNTIME_TEST_FILTER}\" + source .venv/bin/activate && cd /numpy && spin test -- -k \"${RUNTIME_TEST_FILTER}\" '" diff --git a/.github/workflows/linux_simd.yml b/.github/workflows/linux_simd.yml index a9f065e25cc0..92e46c8053b8 100644 --- a/.github/workflows/linux_simd.yml +++ b/.github/workflows/linux_simd.yml @@ -7,7 +7,7 @@ name: Linux SIMD tests # # - baseline_only: # Focuses on completing as quickly as possible and acts as a filter for other, more resource-intensive jobs. -# Utilizes only the default baseline targets (e.g., SSE3 on X86_64) without enabling any runtime dispatched features. +# Utilizes only the default baseline targets (e.g., X86_V2 on X86_64) without enabling any runtime dispatched features. # # - old_gcc: # Tests the oldest supported GCC version with default CPU/baseline/dispatch settings. @@ -19,10 +19,6 @@ name: Linux SIMD tests # Tests against the host CPU features set as the baseline without enabling any runtime dispatched features. # Intended to assess the entire NumPy codebase against host flags, even for code sections lacking handwritten SIMD intrinsics. # -# - without_avx512/avx2/fma3: -# Uses runtime SIMD dispatching but disables AVX2, FMA3, and AVX512. -# Intended to evaluate 128-bit SIMD extensions without FMA support. -# # - without_avx512: # Uses runtime SIMD dispatching but disables AVX512. # Intended to evaluate 128-bit/256-bit SIMD extensions. @@ -35,6 +31,10 @@ on: branches: - main - maintenance/** + paths-ignore: + - '**.pyi' + - '**.md' + - '**.rst' defaults: run: @@ -58,14 +58,14 @@ jobs: env: MESON_ARGS: "-Dallow-noblas=true -Dcpu-dispatch=none" steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: submodules: recursive fetch-tags: true persist-credentials: false - - uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0 + - uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 with: - python-version: '3.11' + python-version: '3.12' - uses: ./.github/meson_actions name: Build/Test @@ -76,14 +76,14 @@ jobs: env: MESON_ARGS: "-Dallow-noblas=true" steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: submodules: recursive fetch-tags: true persist-credentials: false - - uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0 + - uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 with: - python-version: '3.11' + python-version: '3.12' - name: Install GCC9/10 run: | @@ -123,17 +123,16 @@ jobs: args: "-Dallow-noblas=true -Dcpu-baseline=native -Dcpu-dispatch=none" name: "ARM64 SIMD - ${{ matrix.config.name }}" steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: submodules: recursive fetch-tags: true - - uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0 + - uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 with: - python-version: '3.11' + python-version: '3.12' - name: Install dependencies run: | - python -m pip install -r requirements/build_requirements.txt - python -m pip install pytest pytest-xdist hypothesis typing_extensions pytest-timeout + python -m pip install -r requirements/build_requirements.txt -r requirements/test_requirements.txt - name: Build run: | spin build -- ${{ matrix.config.args }} @@ -158,17 +157,12 @@ jobs: - [ "native", "-Dallow-noblas=true -Dcpu-baseline=native -Dcpu-dispatch=none", - "3.11" + "3.12" ] - [ "without avx512", - "-Dallow-noblas=true -Dcpu-dispatch=SSSE3,SSE41,POPCNT,SSE42,AVX,F16C,AVX2,FMA3", - "3.11" - ] - - [ - "without avx512/avx2/fma3", - "-Dallow-noblas=true -Dcpu-dispatch=SSSE3,SSE41,POPCNT,SSE42,AVX,F16C", - "3.11" + "-Dallow-noblas=true -Dcpu-dispatch=max-x86_v4", + "3.12" ] env: @@ -176,12 +170,12 @@ jobs: name: "${{ matrix.BUILD_PROP[0] }}" steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: submodules: recursive fetch-tags: true persist-credentials: false - - uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0 + - uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 with: python-version: "${{ matrix.BUILD_PROP[2] }}" - uses: ./.github/meson_actions @@ -191,28 +185,27 @@ jobs: needs: [baseline_only] runs-on: ubuntu-24.04 steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: submodules: recursive fetch-tags: true persist-credentials: false - - uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0 + - uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 with: - python-version: '3.11' + python-version: '3.12' - name: Install Intel SDE run: | - curl -o /tmp/sde.tar.xz https://downloadmirror.intel.com/788820/sde-external-9.27.0-2023-09-13-lin.tar.xz + curl -o /tmp/sde.tar.xz https://downloadmirror.intel.com/859732/sde-external-9.58.0-2025-06-16-lin.tar.xz mkdir /tmp/sde && tar -xvf /tmp/sde.tar.xz -C /tmp/sde/ sudo mv /tmp/sde/* /opt/sde && sudo ln -s /opt/sde/sde64 /usr/bin/sde - name: Install dependencies run: | - python -m pip install -r requirements/build_requirements.txt - python -m pip install pytest pytest-xdist hypothesis typing_extensions + python -m pip install -r requirements/build_requirements.txt -r requirements/test_requirements.txt - name: Build - run: CC=gcc-13 CXX=g++-13 spin build -- -Denable-openmp=true -Dallow-noblas=true -Dcpu-baseline=avx512_skx -Dtest-simd='BASELINE,AVX512_KNL,AVX512_KNM,AVX512_SKX,AVX512_CLX,AVX512_CNL,AVX512_ICL,AVX512_SPR' + run: CC=gcc-13 CXX=g++-13 spin build -- -Denable-openmp=true -Dallow-noblas=true -Dcpu-baseline=X86_V4 -Dtest-simd='BASELINE,AVX512_ICL,AVX512_SPR' - name: Meson Log if: always() @@ -242,25 +235,24 @@ jobs: needs: [baseline_only] runs-on: ubuntu-24.04 steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: submodules: recursive fetch-tags: true persist-credentials: false - - uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0 + - uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 with: - python-version: '3.11' + python-version: '3.12' - name: Install Intel SDE run: | - curl -o /tmp/sde.tar.xz https://downloadmirror.intel.com/788820/sde-external-9.27.0-2023-09-13-lin.tar.xz + curl -o /tmp/sde.tar.xz https://downloadmirror.intel.com/859732/sde-external-9.58.0-2025-06-16-lin.tar.xz mkdir /tmp/sde && tar -xvf /tmp/sde.tar.xz -C /tmp/sde/ sudo mv /tmp/sde/* /opt/sde && sudo ln -s /opt/sde/sde64 /usr/bin/sde - name: Install dependencies run: | - python -m pip install -r requirements/build_requirements.txt - python -m pip install pytest pytest-xdist hypothesis typing_extensions + python -m pip install -r requirements/build_requirements.txt -r requirements/test_requirements.txt - name: Build run: CC=gcc-13 CXX=g++-13 spin build -- -Denable-openmp=true -Dallow-noblas=true -Dcpu-baseline=avx512_spr diff --git a/.github/workflows/macos.yml b/.github/workflows/macos.yml index 1388a756d216..15d3cf947222 100644 --- a/.github/workflows/macos.yml +++ b/.github/workflows/macos.yml @@ -5,7 +5,10 @@ on: branches: - main - maintenance/** - + paths-ignore: + - '**.pyi' + - '**.md' + - '**.rst' permissions: contents: read # to fetch code (actions/checkout) @@ -22,14 +25,14 @@ jobs: name: macOS x86-64 conda # To enable this workflow on a fork, comment out: if: github.repository == 'numpy/numpy' - runs-on: macos-13 + runs-on: macos-15-intel strategy: fail-fast: false matrix: python-version: ["3.12"] steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: submodules: recursive fetch-tags: true @@ -46,7 +49,7 @@ jobs: echo "today=$(/bin/date -u '+%Y%m%d')" >> $GITHUB_OUTPUT - name: Setup compiler cache - uses: actions/cache@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3 + uses: actions/cache@9255dc7a253b0ccc959486e2bca901246202afeb # v5.0.1 id: cache-ccache with: path: ${{ steps.prep-ccache.outputs.dir }} @@ -55,7 +58,7 @@ jobs: ${{ github.workflow }}-${{ matrix.python-version }}-ccache-macos- - name: Setup Miniforge - uses: conda-incubator/setup-miniconda@505e6394dae86d6a5c7fbb6e3fb8938e3e863830 # v3.1.1 + uses: conda-incubator/setup-miniconda@835234971496cad1653abb28a638a281cf32541f # v3.2.0 with: python-version: ${{ matrix.python-version }} channels: conda-forge @@ -70,7 +73,7 @@ jobs: # ensure we re-solve once a day (since we don't lock versions). Could be # replaced by a conda-lock based approach in the future. - name: Cache conda environment - uses: actions/cache@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3 + uses: actions/cache@9255dc7a253b0ccc959486e2bca901246202afeb # v5.0.1 env: # Increase this value to reset cache if environment.yml has not changed CACHE_NUMBER: 1 @@ -113,31 +116,24 @@ jobs: fail-fast: false matrix: build_runner: - - [ macos-13, "macos_x86_64" ] + - [ macos-15-intel, "macos_x86_64" ] - [ macos-14, "macos_arm64" ] - version: ["3.11", "3.14t-dev"] + version: ["3.12", "3.14t"] steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: submodules: recursive fetch-tags: true persist-credentials: false - - uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0 + - uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 with: python-version: ${{ matrix.version }} - - uses: maxim-lobanov/setup-xcode@60606e260d2fc5762a71e64e74b2174e8ea3c8bd # v1.6.0 - if: ${{ matrix.build_runner[0] == 'macos-13' }} - with: - xcode-version: '14.3' - - name: Install dependencies run: | - pip install -r requirements/build_requirements.txt - pip install -r requirements/setuptools_requirement.txt - pip install pytest pytest-xdist pytest-timeout hypothesis + pip install -r requirements/build_requirements.txt -r requirements/test_requirements.txt - name: Build against Accelerate (LP64) run: spin build -- -Ddisable-optimization=true -Dallow-noblas=false @@ -151,4 +147,11 @@ jobs: spin build -- -Duse-ilp64=true -Ddisable-optimization=true -Dallow-noblas=false - name: Test (fast tests) + if: ${{ matrix.version != '3.14t' || matrix.build_runner[0] != 'macos-14' }} run: spin test -j2 -- --timeout=600 --durations=10 + + - name: Test in multiple threads + if: ${{ matrix.version == '3.14t' && matrix.build_runner[0] == 'macos-14' }} + run: | + pip install pytest-run-parallel==0.7.0 + spin test -p 4 -- --timeout=600 --durations=10 diff --git a/.github/workflows/mypy.yml b/.github/workflows/mypy.yml index 36e89504def7..41f421ce3889 100644 --- a/.github/workflows/mypy.yml +++ b/.github/workflows/mypy.yml @@ -18,11 +18,17 @@ on: - main - maintenance/** paths-ignore: - - 'benchmarks/' - - '.circlecl/' - - 'docs/' - - 'meson_cpu/' - - 'tools/' + - '**.md' + - '**.rst' + - '.circlecl/**' + - '.devcontainer/**' + - '.spin/**' + - 'benchmarks/**' + - 'branding/**' + - 'doc/**' + - 'meson_cpu/**' + - 'tools/**' + - 'vendored-meson/**' workflow_dispatch: defaults: @@ -46,26 +52,30 @@ jobs: fail-fast: false matrix: os_python: - - [ubuntu-latest, '3.12'] - - [windows-latest, '3.11'] - - [macos-latest, '3.11'] + - [macos-latest, '3.14'] + - [ubuntu-latest, '3.13'] + - [windows-latest, '3.12'] steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: submodules: recursive fetch-tags: true persist-credentials: false - - - uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0 + - uses: astral-sh/setup-uv@681c641aba71e4a1c380be3ab5e12ad51f415867 # v7.1.6 with: python-version: ${{ matrix.os_python[1] }} + activate-environment: true + cache-dependency-glob: | + requirements/build_requirements.txt + requirements/test_requirements.txt - name: Install dependencies - run: | - pip install -r requirements/build_requirements.txt - # orjson makes mypy faster but the default requirements.txt - # can't install it because orjson doesn't support 32 bit Linux - pip install orjson - pip install -r requirements/test_requirements.txt + # orjson makes mypy faster but the default requirements.txt + # can't install it because orjson doesn't support 32 bit Linux + run: >- + uv pip install + -r requirements/build_requirements.txt + -r requirements/test_requirements.txt + orjson - name: Build run: | spin build -j2 -- -Dallow-noblas=true -Ddisable-optimization=true --vsenv diff --git a/.github/workflows/mypy_primer.yml b/.github/workflows/mypy_primer.yml index bfbf34fa7817..b0b57cb93d7b 100644 --- a/.github/workflows/mypy_primer.yml +++ b/.github/workflows/mypy_primer.yml @@ -4,9 +4,13 @@ on: # Only run on PR, since we diff against main pull_request: paths: - - "**/*.pyi" - ".github/workflows/mypy_primer.yml" - ".github/workflows/mypy_primer_comment.yml" + - "numpy/**/*.pyi" + - "numpy/_typing/*.py" + - "numpy/typing/*.py" + - "!numpy/typing/tests/**" + - "numpy/py.typed" concurrency: group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} @@ -24,11 +28,11 @@ jobs: shard-index: [0] # e.g. change this to [0, 1, 2] and --num-shards below to 3 fail-fast: false steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: path: numpy_to_test fetch-depth: 0 - - uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0 + - uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 with: python-version: "3.12" - name: Install dependencies @@ -70,7 +74,7 @@ jobs: run: | echo ${{ github.event.pull_request.number }} | tee pr_number.txt - name: Upload mypy_primer diff + PR number - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 + uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6.0.0 if: ${{ matrix.shard-index == 0 }} with: name: mypy_primer_diffs-${{ matrix.shard-index }} @@ -78,7 +82,7 @@ jobs: diff_${{ matrix.shard-index }}.txt pr_number.txt - name: Upload mypy_primer diff - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 + uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6.0.0 if: ${{ matrix.shard-index != 0 }} with: name: mypy_primer_diffs-${{ matrix.shard-index }} @@ -92,7 +96,7 @@ jobs: contents: read steps: - name: Merge artifacts - uses: actions/upload-artifact/merge@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 + uses: actions/upload-artifact/merge@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6.0.0 with: name: mypy_primer_diffs pattern: mypy_primer_diffs-* diff --git a/.github/workflows/mypy_primer_comment.yml b/.github/workflows/mypy_primer_comment.yml index be0dda7f7dec..13eda8c230b3 100644 --- a/.github/workflows/mypy_primer_comment.yml +++ b/.github/workflows/mypy_primer_comment.yml @@ -18,7 +18,7 @@ jobs: if: ${{ github.event.workflow_run.conclusion == 'success' }} steps: - name: Download diffs - uses: actions/github-script@60a0d83039c74a4aee543508d2ffcb1c3799cdea # v7.0.1 + uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8.0.0 with: script: | const fs = require('fs'); @@ -42,23 +42,23 @@ jobs: - name: Get PR number id: get-pr-number - uses: actions/github-script@60a0d83039c74a4aee543508d2ffcb1c3799cdea # v7.0.1 + uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8.0.0 with: script: | const fs = require('fs'); return parseInt(fs.readFileSync("pr_number.txt", { encoding: "utf8" })) - name: Hide old comments - uses: kanga333/comment-hider@c12bb20b48aeb8fc098e35967de8d4f8018fffdf # v0.4.0 + uses: int128/hide-comment-action@9cdf7fd49089308931b20966baee90f4aadb9f6e # v1.48.0 with: - github_token: ${{ secrets.GITHUB_TOKEN }} - issue_number: ${{ steps.get-pr-number.outputs.result }} + token: ${{ secrets.GITHUB_TOKEN }} + issue-number: ${{ steps.get-pr-number.outputs.result }} - run: cat diff_*.txt | tee fulldiff.txt - name: Post comment id: post-comment - uses: actions/github-script@60a0d83039c74a4aee543508d2ffcb1c3799cdea # v7.0.1 + uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8.0.0 with: github-token: ${{ secrets.GITHUB_TOKEN }} script: | diff --git a/.github/workflows/scorecards.yml b/.github/workflows/scorecards.yml index 11a5be5f488a..421790b4ae1e 100644 --- a/.github/workflows/scorecards.yml +++ b/.github/workflows/scorecards.yml @@ -25,12 +25,12 @@ jobs: steps: - name: "Checkout code" - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v3.1.0 + uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v3.1.0 with: persist-credentials: false - name: "Run analysis" - uses: ossf/scorecard-action@f49aabe0b5af0936a0987cfb85d86b75731b0186 # v2.4.1 + uses: ossf/scorecard-action@4eaacf0543bb3f2c246792bd56e8cdeffafb205a # v2.4.3 with: results_file: results.sarif results_format: sarif @@ -42,7 +42,7 @@ jobs: # Upload the results as artifacts (optional). Commenting out will disable # uploads of run results in SARIF format to the repository Actions tab. - name: "Upload artifact" - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 + uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6.0.0 with: name: SARIF file path: results.sarif @@ -50,6 +50,6 @@ jobs: # Upload the results to GitHub's code scanning dashboard. - name: "Upload to code-scanning" - uses: github/codeql-action/upload-sarif@ff0a06e83cb2de871e5a09832bc6a81e7276941f # v2.1.27 + uses: github/codeql-action/upload-sarif@1b168cd39490f61582a9beae412bb7057a6b2c4e # v2.1.27 with: sarif_file: results.sarif diff --git a/.github/workflows/stubtest.yml b/.github/workflows/stubtest.yml new file mode 100644 index 000000000000..526471f799c7 --- /dev/null +++ b/.github/workflows/stubtest.yml @@ -0,0 +1,64 @@ +name: stubtest +permissions: read-all + +# Stubtest depends on different branches and paths than mypy does, so we have a separate workflow. + +on: + pull_request: + branches: + - "main" + - "maintenance/2.**" + # Stubtest requires numpy>=2.4 + - "!maintenance/2.[0-3].x" + paths: + - ".github/workflows/stubtest.yml" + - "numpy/**" + - "!numpy/**/tests/**" + - "requirements/test_requirements.txt" + - "tools/stubtest/**" + workflow_dispatch: + +concurrency: + group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} + cancel-in-progress: true + +jobs: + mypy: + # To enable this workflow on a fork, comment out: + if: github.repository == 'numpy/numpy' + + name: stubtest + runs-on: ${{ matrix.os }}-latest + strategy: + fail-fast: false + matrix: + # TODO: consider including macos and windows + os: [ubuntu] + py: ["3.12", "3.14"] + + steps: + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 + with: + submodules: recursive + fetch-tags: true + persist-credentials: false + + - uses: astral-sh/setup-uv@681c641aba71e4a1c380be3ab5e12ad51f415867 # v7.1.6 + with: + python-version: ${{ matrix.py }} + activate-environment: true + cache-dependency-glob: | + requirements/build_requirements.txt + requirements/test_requirements.txt + + - name: uv pip install + run: >- + uv pip install + -r requirements/build_requirements.txt + -r requirements/test_requirements.txt + + - name: spin build + run: spin build -j2 -- -Dallow-noblas=true -Ddisable-optimization=true --vsenv + + - name: spin stubtest + run: spin stubtest diff --git a/.github/workflows/wheels.yml b/.github/workflows/wheels.yml index e96021775f3c..27ee88d80677 100644 --- a/.github/workflows/wheels.yml +++ b/.github/workflows/wheels.yml @@ -1,34 +1,18 @@ -# Workflow to build and test wheels. -# To work on the wheel building infrastructure on a fork, comment out: +# Workflow to build and test wheels, similarly to numpy/numpy-release. +# To work on these jobs in a fork, comment out: # -# if: github.repository == 'numpy/numpy' -# -# in the get_commit_message job. Be sure to include [wheel build] in your commit -# message to trigger the build. All files related to wheel building are located -# at tools/wheels/ -# Alternatively, you can add labels to the pull request in order to trigger wheel -# builds. -# The labels that trigger builds are: -# 36 - Build(for changes to the building process, -# 14 - Release(ensure wheels build before release) +# if: github.repository == 'numpy/numpy' name: Wheel builder on: - schedule: - # ┌───────────── minute (0 - 59) - # │ ┌───────────── hour (0 - 23) - # │ │ ┌───────────── day of the month (1 - 31) - # │ │ │ ┌───────────── month (1 - 12 or JAN-DEC) - # │ │ │ │ ┌───────────── day of the week (0 - 6 or SUN-SAT) - # │ │ │ │ │ - - cron: "42 2 * * SUN,WED" pull_request: branches: - main - maintenance/** - push: - tags: - - v* + paths-ignore: + - '**.pyi' + - '**.md' + - '**.rst' workflow_dispatch: concurrency: @@ -39,42 +23,12 @@ permissions: contents: read # to fetch code (actions/checkout) jobs: - get_commit_message: - name: Get commit message - runs-on: ubuntu-latest - # Only workflow_dispatch is enabled on forks. - # To enable this job and subsequent jobs on a fork for other events, comment out: - if: github.repository == 'numpy/numpy' || github.event_name == 'workflow_dispatch' - outputs: - message: ${{ steps.commit_message.outputs.message }} - steps: - - name: Checkout numpy - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - # Gets the correct commit message for pull request - with: - ref: ${{ github.event.pull_request.head.sha }} - persist-credentials: false - - name: Get commit message - id: commit_message - env: - HEAD: ${{ github.ref }} - run: | - set -xe - COMMIT_MSG=$(git log --no-merges -1 --oneline) - echo "message=$COMMIT_MSG" >> $GITHUB_OUTPUT - echo github.ref "$HEAD" - build_wheels: name: Build wheel ${{ matrix.python }}-${{ matrix.buildplat[1] }}-${{ matrix.buildplat[2] }} - needs: get_commit_message - if: >- - contains(needs.get_commit_message.outputs.message, '[wheel build]') || - github.event_name == 'schedule' || - github.event_name == 'workflow_dispatch' || - (github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v') && ( ! endsWith(github.ref, 'dev0'))) + # To enable this job on a fork, comment out: + if: github.repository == 'numpy/numpy' runs-on: ${{ matrix.buildplat[0] }} strategy: - # Ensure that a wheel builder finishes even if another fails fail-fast: false matrix: # Github Actions doesn't support pairing matrix values together, let's improvise @@ -84,37 +38,17 @@ jobs: - [ubuntu-22.04, musllinux_x86_64, ""] - [ubuntu-22.04-arm, manylinux_aarch64, ""] - [ubuntu-22.04-arm, musllinux_aarch64, ""] - - [macos-13, macosx_x86_64, openblas] - - # targeting macos >= 14. Could probably build on macos-14, but it would be a cross-compile - - [macos-13, macosx_x86_64, accelerate] - - [macos-14, macosx_arm64, accelerate] # always use accelerate - - [windows-2019, win_amd64, ""] - - [windows-2019, win32, ""] + - [macos-15-intel, macosx_x86_64, openblas] + - [macos-14, macosx_arm64, openblas] + - [windows-2022, win_amd64, ""] - [windows-11-arm, win_arm64, ""] - python: ["cp311", "cp312", "cp313", "cp313t", "pp311"] - exclude: - # Don't build PyPy 32-bit windows - - buildplat: [windows-2019, win32, ""] - python: "pp311" - # Don't build PyPy arm64 windows - - buildplat: [windows-11-arm, win_arm64, ""] - python: "pp311" - # No PyPy on musllinux images - - buildplat: [ ubuntu-22.04, musllinux_x86_64, "" ] - python: "pp311" - - buildplat: [ ubuntu-22.04-arm, musllinux_aarch64, "" ] - python: "pp311" - - buildplat: [ macos13, macosx_x86_64, openblas ] - python: "cp313t" + python: ["cp312"] env: - IS_32_BIT: ${{ matrix.buildplat[1] == 'win32' }} - IS_PUSH: ${{ github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v') }} - IS_SCHEDULE_DISPATCH: ${{ github.event_name == 'schedule' || github.event_name == 'workflow_dispatch' }} + IS_32_BIT: ${{ matrix.buildplat[1] == 'win32' }} # used in cibw_test_command.sh steps: - name: Checkout numpy - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: submodules: true persist-credentials: false @@ -125,13 +59,12 @@ jobs: with: architecture: 'x86' - - name: Setup MSVC arm64 + - name: Setup LLVM for Windows ARM64 if: ${{ matrix.buildplat[1] == 'win_arm64' }} - uses: bus1/cabuild/action/msdevshell@e22aba57d6e74891d059d66501b6b5aed8123c4d # v1 - with: - architecture: 'arm64' + uses: ./.github/windows_arm64_steps - name: pkg-config-for-win + if: runner.os == 'windows' run: | choco install -y --no-progress --stoponfirstfailure --checksum 6004DF17818F5A6DBF19CB335CC92702 pkgconfiglite $CIBW = "${{ github.workspace }}/.openblas" @@ -141,15 +74,9 @@ jobs: # passed through, so convert it to '/' $CIBW = $CIBW.replace("\","/") echo "CIBW_ENVIRONMENT_WINDOWS=PKG_CONFIG_PATH=$CIBW" >> $env:GITHUB_ENV - if: runner.os == 'windows' - - # Used to push the built wheels - - uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0 - with: - python-version: "3.x" - name: Setup macOS - if: matrix.buildplat[0] == 'macos-13' || matrix.buildplat[0] == 'macos-14' + if: matrix.buildplat[0] == 'macos-15-intel' || matrix.buildplat[0] == 'macos-14' run: | # Needed due to https://github.com/actions/runner-images/issues/3371 # Supported versions: https://github.com/actions/runner-images/blob/main/images/macos/macos-14-arm64-Readme.md @@ -161,10 +88,6 @@ jobs: # only target Sonoma onwards CIBW="MACOSX_DEPLOYMENT_TARGET=14.0 INSTALL_OPENBLAS=false RUNNER_OS=macOS" echo "CIBW_ENVIRONMENT_MACOS=$CIBW" >> "$GITHUB_ENV" - - # the macos-13 image that's used for building the x86_64 wheel can't test - # a wheel with deployment target >= 14 without further work - echo "CIBW_TEST_SKIP=*-macosx_x86_64" >> "$GITHUB_ENV" else # macosx_x86_64 with OpenBLAS # if INSTALL_OPENBLAS isn't specified then scipy-openblas is automatically installed @@ -175,123 +98,11 @@ jobs: fi - name: Build wheels - uses: pypa/cibuildwheel@faf86a6ed7efa889faf6996aa23820831055001a # v2.23.3 + uses: pypa/cibuildwheel@63fd63b352a9a8bdcc24791c9dbee952ee9a8abc # v3.3.0 env: CIBW_BUILD: ${{ matrix.python }}-${{ matrix.buildplat[1] }} - - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 + - uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6.0.0 with: name: ${{ matrix.python }}-${{ matrix.buildplat[1] }}-${{ matrix.buildplat[2] }} path: ./wheelhouse/*.whl - - - uses: mamba-org/setup-micromamba@0dea6379afdaffa5d528b3d1dabc45da37f443fc - if: ${{ matrix.buildplat[1] != 'win_arm64' }} # unsupported platform at the moment - with: - # for installation of anaconda-client, required for upload to - # anaconda.org - # Note that this step is *after* specific pythons have been used to - # build and test the wheel - # for installation of anaconda-client, for upload to anaconda.org - # environment will be activated after creation, and in future bash steps - init-shell: bash - environment-name: upload-env - create-args: >- - anaconda-client - - - name: Upload wheels - if: success() && github.repository == 'numpy/numpy' - shell: bash -el {0} - # see https://github.com/marketplace/actions/setup-miniconda for why - # `-el {0}` is required. - env: - NUMPY_STAGING_UPLOAD_TOKEN: ${{ secrets.NUMPY_STAGING_UPLOAD_TOKEN }} - NUMPY_NIGHTLY_UPLOAD_TOKEN: ${{ secrets.NUMPY_NIGHTLY_UPLOAD_TOKEN }} - run: | - source tools/wheels/upload_wheels.sh - set_upload_vars - # trigger an upload to - # https://anaconda.org/scientific-python-nightly-wheels/numpy - # for cron jobs or "Run workflow" (restricted to main branch). - # Tags will upload to - # https://anaconda.org/multibuild-wheels-staging/numpy - # The tokens were originally generated at anaconda.org - upload_wheels - - build_sdist: - name: Build sdist - needs: get_commit_message - if: >- - contains(needs.get_commit_message.outputs.message, '[wheel build]') || - github.event_name == 'schedule' || - github.event_name == 'workflow_dispatch' || - (github.event_name == 'pull_request' && - (contains(github.event.pull_request.labels.*.name, '36 - Build') || - contains(github.event.pull_request.labels.*.name, '14 - Release'))) || - (github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v') && ( ! endsWith(github.ref, 'dev0'))) - runs-on: ubuntu-latest - env: - IS_PUSH: ${{ github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v') }} - # commented out so the sdist doesn't upload to nightly - # IS_SCHEDULE_DISPATCH: ${{ github.event_name == 'schedule' || github.event_name == 'workflow_dispatch' }} - steps: - - name: Checkout numpy - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - with: - submodules: true - persist-credentials: false - # Used to push the built wheels - - uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0 - with: - # Build sdist on lowest supported Python - python-version: "3.11" - - name: Build sdist - run: | - python -m pip install -U pip build - python -m build --sdist -Csetup-args=-Dallow-noblas=true - - name: Test the sdist - run: | - # TODO: Don't run test suite, and instead build wheels from sdist - # Depends on pypa/cibuildwheel#1020 - python -m pip install dist/*.gz -Csetup-args=-Dallow-noblas=true - pip install -r requirements/test_requirements.txt - cd .. # Can't import numpy within numpy src directory - python -c "import numpy, sys; print(numpy.__version__); sys.exit(numpy.test() is False)" - - - name: Check README rendering for PyPI - run: | - python -mpip install twine - twine check dist/* - - - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 - with: - name: sdist - path: ./dist/* - - - uses: conda-incubator/setup-miniconda@505e6394dae86d6a5c7fbb6e3fb8938e3e863830 # v3.1.1 - with: - # for installation of anaconda-client, required for upload to - # anaconda.org - # default (and activated) environment name is test - # Note that this step is *after* specific pythons have been used to - # build and test - auto-update-conda: true - python-version: "3.11" - - - name: Upload sdist - if: success() && github.repository == 'numpy/numpy' - shell: bash -el {0} - env: - NUMPY_STAGING_UPLOAD_TOKEN: ${{ secrets.NUMPY_STAGING_UPLOAD_TOKEN }} - # commented out so the sdist doesn't upload to nightly - # NUMPY_NIGHTLY_UPLOAD_TOKEN: ${{ secrets.NUMPY_NIGHTLY_UPLOAD_TOKEN }} - run: | - conda install -y anaconda-client - source tools/wheels/upload_wheels.sh - set_upload_vars - # trigger an upload to - # https://anaconda.org/scientific-python-nightly-wheels/numpy - # for cron jobs or "Run workflow" (restricted to main branch). - # Tags will upload to - # https://anaconda.org/multibuild-wheels-staging/numpy - # The tokens were originally generated at anaconda.org - upload_wheels diff --git a/.github/workflows/windows.yml b/.github/workflows/windows.yml index f61419df09cd..3ef295cc8f5b 100644 --- a/.github/workflows/windows.yml +++ b/.github/workflows/windows.yml @@ -5,6 +5,10 @@ on: branches: - main - maintenance/** + paths-ignore: + - '**.pyi' + - '**.md' + - '**.rst' concurrency: group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} @@ -14,30 +18,23 @@ permissions: contents: read # to fetch code (actions/checkout) jobs: - python64bit_openblas: - name: x86-64, LP64 OpenBLAS - runs-on: windows-2019 + clangcl_python64bit_openblas32: + name: Clang-cl, x86-64, fast, openblas32 + runs-on: windows-2022 # To enable this job on a fork, comment out: if: github.repository == 'numpy/numpy' - strategy: - fail-fast: false - matrix: - compiler-pyversion: - - ["MSVC", "3.11"] - - ["Clang-cl", "3.14t-dev"] - steps: - name: Checkout - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: submodules: recursive fetch-tags: true persist-credentials: false - name: Setup Python - - uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0 + uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 with: - python-version: ${{ matrix.compiler-pyversion[1] }} + python-version: "3.14t" - name: Install build dependencies from PyPI run: | @@ -48,23 +45,7 @@ jobs: choco install -y --stoponfirstfailure --checksum 6004DF17818F5A6DBF19CB335CC92702 pkgconfiglite echo "PKG_CONFIG_PATH=${{ github.workspace }}/.openblas" >> $env:GITHUB_ENV - - - name: Install Clang-cl - if: matrix.compiler-pyversion[0] == 'Clang-cl' - run: | - # llvm is preinstalled, but leave - # this here in case we need to pin the - # version at some point. - #choco install llvm -y - - - name: Install NumPy (MSVC) - if: matrix.compiler-pyversion[0] == 'MSVC' - run: | - pip install -r requirements/ci_requirements.txt - spin build --with-scipy-openblas=32 -j2 -- --vsenv - - name: Install NumPy (Clang-cl) - if: matrix.compiler-pyversion[0] == 'Clang-cl' run: | "[binaries]","c = 'clang-cl'","cpp = 'clang-cl'","ar = 'llvm-lib'","c_ld = 'lld-link'","cpp_ld = 'lld-link'" | Out-File $PWD/clang-cl-build.ini -Encoding ascii pip install -r requirements/ci_requirements.txt @@ -85,14 +66,16 @@ jobs: run: | spin test -- --timeout=600 --durations=10 - msvc_python_no_openblas: - name: MSVC, ${{ matrix.architecture }} Python , no BLAS + + #======================================================================================= + msvc_python32bit_no_openblas: + name: MSVC, ${{ matrix.architecture }}, fast, no BLAS runs-on: ${{ matrix.os }} strategy: fail-fast: false matrix: include: - - os: windows-2019 + - os: windows-2022 architecture: x86 - os: windows-11-arm architecture: arm64 @@ -100,16 +83,16 @@ jobs: if: github.repository == 'numpy/numpy' steps: - name: Checkout - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: submodules: recursive fetch-tags: true persist-credentials: false - name: Setup Python - uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0 + uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 with: - python-version: '3.11' + python-version: '3.12' architecture: ${{ matrix.architecture }} - name: Setup MSVC @@ -119,7 +102,14 @@ jobs: - name: Build and install run: | - python -m pip install . -v -Ccompile-args="-j2" -Csetup-args="-Dallow-noblas=true" + python -m pip install . -v -Ccompile-args="-j2" -Csetup-args="-Dallow-noblas=true" 2>&1 | tee build.log + + - name: Check warnings + uses: ./.github/check-warnings + with: + log-file: ./build.log + allowlist: ./.github/check-warnings/msvc-allowed-warnings.txt + warning-regex: "warning C|Command line warning" - name: Install test dependencies run: | @@ -129,3 +119,59 @@ jobs: run: | cd tools python -m pytest --pyargs numpy -m "not slow" -n2 --timeout=600 --durations=10 + + #======================================================================================= + msvc_python64bit_openblas: + name: MSVC, x86_64, ${{ matrix.TEST_MODE }}, openblas${{ matrix.BLAS }} + runs-on: windows-2022 + strategy: + fail-fast: false + matrix: + include: + - BLAS: 64 + TEST_MODE: full + pyver: '3.14' + - BLAS: 32 + TEST_MODE: fast + pyver: '3.12' + + # To enable this job on a fork, comment out: + if: github.repository == 'numpy/numpy' + steps: + - name: Checkout + uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 + with: + submodules: recursive + fetch-tags: true + persist-credentials: false + + - name: Setup Python + uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 + with: + python-version: ${{ matrix.pyver }} + + - name: pkg-config + run: | + choco install -y --stoponfirstfailure --checksum 6004DF17818F5A6DBF19CB335CC92702 pkgconfiglite + + - name: Dependencies + run: | + python -m pip install -r requirements/test_requirements.txt + + - name: Build and install + run: | + pip install -r requirements/ci_requirements.txt + spin config-openblas --with-scipy-openblas=${{ matrix.BLAS }} + $env:PKG_CONFIG_PATH="$pwd/.openblas" + python -m pip install . -v -Csetup-args="--vsenv" -Csetup-args="-Dallow-noblas=false" + + - name: Run test suite ${{ matrix.TEST_MODE }} + run: | + cd tools + # Get a gfortran onto the path for f2py tests + $env:PATH = "c:\\rtools45\\x86_64-w64-mingw32.static.posix\\bin;$env:PATH" + If ( "${{ matrix.TEST_MODE }}" -eq "full" ) { + python -m pytest --pyargs numpy -rsx -n2 --durations=10 + } else { + python -m pytest --pyargs numpy -m "not slow" -n2 --timeout=600 --durations=10 -rsx + } diff --git a/.github/workflows/windows_arm64.yml b/.github/workflows/windows_arm64.yml deleted file mode 100644 index 0a691bff9b21..000000000000 --- a/.github/workflows/windows_arm64.yml +++ /dev/null @@ -1,208 +0,0 @@ -name: Windows Arm64 - -on: - workflow_dispatch: - -env: - python_version: 3.12 - -concurrency: - group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} - cancel-in-progress: true - -permissions: - contents: read # to fetch code (actions/checkout) - -jobs: - windows_arm: - runs-on: windows-2019 - - # To enable this job on a fork, comment out: - if: github.repository == 'numpy/numpy' - steps: - - name: Checkout - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - with: - submodules: recursive - fetch-tags: true - persist-credentials: false - - - name: Setup Python - uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0 - with: - python-version: ${{env.python_version}} - architecture: x64 - - - name: Install build dependencies from PyPI - run: | - python -m pip install -r requirements/build_requirements.txt - - - name: Prepare python - shell: powershell - run: | - $ErrorActionPreference = "Stop" - - #Detecting python location and version - $PythonDir = (Split-Path -Parent (get-command python).Path) - $PythonVersionParts = ( -split (python -V)) - $PythonVersion = $PythonVersionParts[1] - - #Downloading the package for appropriate python version from nuget - $PythonARM64NugetLink = "https://www.nuget.org/api/v2/package/pythonarm64/$PythonVersion" - $PythonARM64NugetZip = "nuget_python.zip" - $PythonARM64NugetDir = "temp_nuget" - Invoke-WebRequest $PythonARM64NugetLink -OutFile $PythonARM64NugetZip - - #Changing the libs folder to enable python libraries to be linked for arm64 - Expand-Archive $PythonARM64NugetZip $PythonARM64NugetDir - Copy-Item $PythonARM64NugetDir\tools\libs\* $PythonDir\libs - Remove-Item -Force -Recurse $PythonARM64NugetDir - Remove-Item -Force $PythonARM64NugetZip - - if ((Test-Path -LiteralPath variable:\LASTEXITCODE)) { exit $LASTEXITCODE } - - - name: Prepare Licence - shell: powershell - run: | - $ErrorActionPreference = "Stop" - - $CurrentDir = (get-location).Path - $LicenseFile = "$CurrentDir\LICENSE.txt" - Set-Content $LicenseFile ([Environment]::NewLine) - Add-Content $LicenseFile "----" - Add-Content $LicenseFile ([Environment]::NewLine) - Add-Content $LicenseFile (Get-Content "$CurrentDir\LICENSES_bundled.txt") - Add-Content $LicenseFile (Get-Content "$CurrentDir\tools\wheels\LICENSE_win32.txt") - - if ((Test-Path -LiteralPath variable:\LASTEXITCODE)) { exit $LASTEXITCODE } - - - name: Wheel build - shell: powershell - run: | - $ErrorActionPreference = "Stop" - - #Creating cross compile script for messon subsystem - $CurrentDir = (get-location) - $CrossScript = "$CurrentDir\arm64_w64.txt" - $CrossScriptContent = - { - [host_machine] - system = 'windows' - subsystem = 'windows' - kernel = 'nt' - cpu_family = 'aarch64' - cpu = 'aarch64' - endian = 'little' - - [binaries] - c='cl.exe' - cpp = 'cl.exe' - - [properties] - sizeof_short = 2 - sizeof_int = 4 - sizeof_long = 4 - sizeof_long_long = 8 - sizeof_float = 4 - sizeof_double = 8 - sizeof_long_double = 8 - sizeof_size_t = 8 - sizeof_wchar_t = 2 - sizeof_off_t = 4 - sizeof_Py_intptr_t = 8 - sizeof_PY_LONG_LONG = 8 - longdouble_format = 'IEEE_DOUBLE_LE' - } - Set-Content $CrossScript $CrossScriptContent.ToString() - - #Setting up cross compilers from MSVC - $Products = 'Community', 'Professional', 'Enterprise', 'BuildTools' | % { "Microsoft.VisualStudio.Product.$_" } - $VsInstallPath = (vswhere -products $Products -latest -format json | ConvertFrom-Json).installationPath - $VSVars = (Get-ChildItem -Path $VsInstallPath -Recurse -Filter "vcvarsamd64_arm64.bat").FullName - $ScriptingObj = New-Object -ComObject Scripting.FileSystemObject - $VSVarsShort = $ScriptingObj.GetFile($VSVars).ShortPath - cmd /c "$VSVarsShort && set" | - ForEach-Object { - if ($_ -match "=") { - $Var = $_.split("=") - set-item -force -path "ENV:\$($Var[0])" -value "$($Var[1])" - } - } - - #Building the wheel - pip wheel . --config-settings=setup-args="--cross-file=$CrossScript" - - if ((Test-Path -LiteralPath variable:\LASTEXITCODE)) { exit $LASTEXITCODE } - - - name: Fix wheel - shell: powershell - run: | - $ErrorActionPreference = "Stop" - - #Finding whl file - $CurrentDir = (get-location) - $WhlName = ((Get-ChildItem -Filter "*.whl").FullName) - $ZipWhlName = "$CurrentDir\ZipWhlName.zip" - $UnzippedWhl = "$CurrentDir\unzipedWhl" - - #Expanding whl file - Rename-Item -Path $WhlName $ZipWhlName - if (Test-Path $UnzippedWhl) { - Remove-Item -Force -Recurse $UnzippedWhl - } - Expand-Archive -Force -Path $ZipWhlName $UnzippedWhl - - #Renaming all files to show that their arch is arm64 - Get-ChildItem -Recurse -Path $UnzippedWhl *win_amd64* | Rename-Item -NewName { $_.Name -replace 'win_amd64', 'win_arm64' } - $DIST_DIR = (Get-ChildItem -Recurse -Path $UnzippedWhl *dist-info).FullName - - #Changing amd64 references from metafiles - (GET-Content $DIST_DIR/RECORD) -replace 'win_amd64', 'win_arm64' | Set-Content $DIST_DIR/RECORD - (GET-Content $DIST_DIR/WHEEL) -replace 'win_amd64', 'win_arm64' | Set-Content $DIST_DIR/WHEEL - - #Packing whl file - Compress-Archive -Path $UnzippedWhl\* -DestinationPath $ZipWhlName -Force - $WhlName = $WhlName.Replace("win_amd64", "win_arm64") - Rename-Item -Path $ZipWhlName $WhlName - - if ((Test-Path -LiteralPath variable:\LASTEXITCODE)) { exit $LASTEXITCODE } - - - name: Upload Artifacts - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 - with: - name: ${{ env.python_version }}-win_arm64 - path: ./*.whl - - - name: Setup Mamba - uses: mamba-org/setup-micromamba@0dea6379afdaffa5d528b3d1dabc45da37f443fc - with: - # for installation of anaconda-client, required for upload to - # anaconda.org - # Note that this step is *after* specific pythons have been used to - # build and test the wheel - # for installation of anaconda-client, for upload to anaconda.org - # environment will be activated after creation, and in future bash steps - init-shell: bash - environment-name: upload-env - create-args: >- - anaconda-client - - # - name: Upload wheels - # if: success() - # shell: bash -el {0} - # # see https://github.com/marketplace/actions/setup-miniconda for why - # # `-el {0}` is required. - # env: - # NUMPY_STAGING_UPLOAD_TOKEN: ${{ secrets.NUMPY_STAGING_UPLOAD_TOKEN }} - # NUMPY_NIGHTLY_UPLOAD_TOKEN: ${{ secrets.NUMPY_NIGHTLY_UPLOAD_TOKEN }} - # run: | - # source tools/wheels/upload_wheels.sh - # set_upload_vars - # # trigger an upload to - # # https://anaconda.org/scientific-python-nightly-wheels/numpy - # # for cron jobs or "Run workflow" (restricted to main branch). - # # Tags will upload to - # # https://anaconda.org/multibuild-wheels-staging/numpy - # # The tokens were originally generated at anaconda.org - # upload_wheels - diff --git a/.gitignore b/.gitignore index df7f084e3645..b54de4091bf3 100644 --- a/.gitignore +++ b/.gitignore @@ -43,28 +43,11 @@ GTAGS *.so *.mod -# Packages # -############ -# it's better to unpack these files and commit the raw source -# git has its own built in compression methods -*.7z -*.bz2 -*.bzip2 -*.dmg -*.gz -*.iso -*.jar -*.rar -*.tar -*.tbz2 -*.tgz -*.zip - # Python files # ################ # meson build/installation directories build -build-install +build-* # meson python output .mesonpy-native-file.ini # sphinx build directory diff --git a/.mailmap b/.mailmap index f33dfddb6492..18cfb272618f 100644 --- a/.mailmap +++ b/.mailmap @@ -11,6 +11,7 @@ !Algorithmist-Girl <36552319+Algorithmist-Girl@users.noreply.github.com> !DWesl <22566757+DWesl@users.noreply.github.com> !Dreamge +!EarlMilktea <66886825+EarlMilktea@users.noreply.github.com> !Endolith !GalaxySnail !Illviljan <14371165+Illviljan@users.noreply.github.com> @@ -21,6 +22,7 @@ !Scian <65375075+hoony6134@users.noreply.github.com> !Searchingdays !amagicmuffin <2014wcheng@gmail.com> +!amotzop !bersbersbers <12128514+bersbersbers@users.noreply.github.com> !code-review-doctor !cook-1229 <70235336+cook-1229@users.noreply.github.com> @@ -34,6 +36,7 @@ !hutauf !jbCodeHub !juztamau5 +!karl3wm !legoffant <58195095+legoffant@users.noreply.github.com> !liang3zy22 <35164941+liang3zy22@users.noreply.github.com> !luzpaz @@ -51,6 +54,7 @@ !pmvz !pojaghi <36278217+pojaghi@users.noreply.github.com> !pratiklp00 +!samir539 !sfolje0 !spacescientist !stefan6419846 @@ -59,12 +63,15 @@ !tautaus !undermyumbrella1 !vahidmech +!wenlong2 !xoviat <49173759+xoviat@users.noreply.github.com> !xoviat <49173759+xoviat@users.noreply.github.com> !yan-wyb !yetanothercheer Aaron Baecker Adrin Jalali +Abhishek Kumar +Abhishek Kumar <142383124+abhishek-iitmadras@users.noreply.github.com> Abraham Medina Arun Kota Arun Kota Arun Kota @@ -140,6 +147,8 @@ Anton Prosekin AnÅže Starič Arfy Slowy Arnaud Ma +Arnaud Tremblay +Arnaud Tremblay <59627629+Msa360@users.noreply.github.com> Aron Ahmadia Arun Kota Arun Kota @@ -190,6 +199,8 @@ Carl Kleffner Carl Leake Carlos Henrique Hermanny Moreira da Silva Carlos Henrique Hermanny Moreira da Silva <121122527+carlosilva10260@users.noreply.github.com> +Carlos Martin +Carlos Martin CÊdric Hannotier Charles Stern <62192187+cisaacstern@users.noreply.github.com> Chiara Marmo @@ -200,7 +211,10 @@ Chris Burns Chris Fu (傅įĢ‹ä¸š) <17433201@qq.com> Chris Holland <41524756+ChrisAHolland@users.noreply.github.com> Chris Kerr +Chris Navarro +Chris Navarro <24905907+lvllvl@users.noreply.github.com> Chris Vavaliaris +Christian Barbia Christian Clauss Christopher Dahlin Christopher Hanley @@ -270,6 +284,7 @@ Eric Fode Eric Fode Eric Quintero Eric Xie <161030123+EngineerEricXie@users.noreply.github.com> Ernest N. Mamikonyan +Ernst Peng Eskild Eriksen Eskild Eriksen <42120229+iameskild@users.noreply.github.com> Eskild Eriksen @@ -300,8 +315,11 @@ Gregory R. Lee Gregory R. Lee Guo Ci guoci Guo Shuai +Gyeongjae Choi Habiba Hye Habiba Hye <145866308+HabibiHye@users.noreply.github.com> +Halle Loveday +Halle Loveday Hameer Abbasi Hannah Aizenman Han Genuit @@ -311,6 +329,10 @@ Helder Oliveira Hemil Desai Himanshu Hiroyuki V. Yamazaki +Daniel Hrisca +Daniel Hrisca +François de Coatpont +François de Coatpont <93073405+Chevali2004@users.noreply.github.com> Hugo van Kemenade Iantra Solari I-Shen Leong @@ -363,6 +385,7 @@ JessÊ Pires Jessi J Zhao <35235453+jessijzhao@users.noreply.github.com> Jhong-Ken Chen (険äģ˛č‚¯) Jhong-Ken Chen (険äģ˛č‚¯) <37182101+kennychenfs@users.noreply.github.com> +Jiuding Tan (谭九éŧŽ) <109224573@qq.com> Johann Faouzi Johann Rohwer Johann Rohwer jmrohwer @@ -447,10 +470,13 @@ Luke Zoltan Kelley Madhulika Jain Chambers <53166646+madhulikajc@users.noreply.github.com> Magdalena Proszewska Magdalena Proszewska <38814059+mproszewska@users.noreply.github.com> +Makima C. Yang Malik Idrees Hasan Khan <77000356+MalikIdreesHasanKhan@users.noreply.github.com>C Manoj Kumar Marcel Loose Marcin Podhajski <36967358+m-podhajski@users.noreply.github.com> +Marco Edward Gorelli +Marco Edward Gorelli <33491632+MarcoGorelli@users.noreply.github.com> Margret Pax Margret Pax <13646646+paxcodes@users.noreply.github.com> Mark DePristo @@ -508,6 +534,7 @@ Michael Schnaitter Michael Seifert Michel Fruchart +Mike O'Brien Mike Toews Miki Watanabe (æ¸Ąé‚‰ įžŽå¸Œ) Miles Cranmer @@ -516,9 +543,12 @@ Milica Dančuk love-bees <33499899+love-bees@users.noreply.g Mircea Akos Bruma Mircea Akos Bruma Mitchell Faas <35742861+Mitchell-Faas@users.noreply.github.com> +Mohammed Abdul Rahman +Mohammed Abdul Rahman <130785777+that-ar-guy@users.noreply.github.com> Mohaned Qunaibit Muhammad Kasim Muhammed Muhsin +Mugundan Selvanayagam Mukulika Pahari Mukulika Pahari <60316606+Mukulikaa@users.noreply.github.com> Munira Alduraibi @@ -571,6 +601,7 @@ Peter J Cock Peter Kämpf Peyton Murray Phil Elson +Filipe Laíns Pierre GM Pierre GM pierregm Piotr Gaiński @@ -591,6 +622,8 @@ Rehas Sachdeva Richard Howe <45905457+rmhowe425@users.noreply.github.com> Ritta Narita Riya Sharma +Rob Timpe +Rob Timpe Robert Kern Robert LU Robert T. McGibbon @@ -623,6 +656,7 @@ Saransh Chopra Saullo Giovani Saurabh Mehta Sayantika Banik +Sayed Awad Schrijvers Luc Sean Cheah Sean Cheah <67928790+thalassemia@users.noreply.github.com> @@ -660,6 +694,7 @@ Steve Stagg Steven J Kern Stuart Archibald Stuart Archibald +SUMIT SRIMANI <2301109104@ptuniv.edu.in SuryaChand P Sylvain Ferriol Takanori Hirano @@ -696,6 +731,8 @@ Vinith Kishore <85550536+vinith2@users.noreply.github Vrinda Narayan Vrinda Narayan Vrinda Narayan <48102157+vrindaaa@users.noreply.github.com> +Wang Yang (杨æ—ē) +Wang Yang (杨æ—ē) <1113177880@qq.com> Wansoo Kim Warrick Ball Warrick Ball @@ -711,11 +748,13 @@ Xiangyi Wang Yamada Fuyuka Yang Hau Yang Hau -Yang Wang Yash Pethe Yash Pethe <83630710+patient74@users.noreply.github.com> Yashasvi Misra Yashasvi Misra <54177363+yashasvimisra2798@users.noreply.github.com> +Yasir Ashfaq +Yasir Ashfaq <107119183+yasiribmcon@users.noreply.github.com> +Yichi Zhang Yogesh Raisinghani <46864533+raisinghanii@users.noreply.github.com> Younes Sandi Younes Sandi <65843206+Unessam@users.noreply.github.com> @@ -723,6 +762,8 @@ Yu Feng Yuji Kanagawa Yuki K Yury Kirienko +Yuvraj Pradhan +Yuvraj Pradhan Zac Hatfield-Dodds Zach Brugh <111941670+zachbrugh@users.noreply.github.com> ZÊ Vinícius diff --git a/.spin/cmds.py b/.spin/cmds.py index e5ae29d4a6a2..5c4d5e90f6d7 100644 --- a/.spin/cmds.py +++ b/.spin/cmds.py @@ -141,11 +141,24 @@ def docs(*, parent_callback, **kwargs): default=default, help="Run tests with the given markers" ) +@click.option( + "-p", + "--parallel-threads", + metavar='PARALLEL_THREADS', + default="1", + help="Run tests many times in number of parallel threads under pytest-run-parallel." + " Can be set to `auto` to use all cores. Use `spin test -p -- " + "--skip-thread-unsafe=true` to only run tests that can run in parallel. " + "pytest-run-parallel must be installed to use." +) @spin.util.extend_command(spin.cmds.meson.test) -def test(*, parent_callback, pytest_args, tests, markexpr, **kwargs): +def test(*, parent_callback, pytest_args, tests, markexpr, parallel_threads, **kwargs): """ By default, spin will run `-m 'not slow'`. To run the full test suite, use `spin test -m full` + + When pytest-run-parallel is avaliable, use `spin test -p auto` or + `spin test -p ` to run tests sequentional in parallel threads. """ # noqa: E501 if (not pytest_args) and (not tests): pytest_args = ('--pyargs', 'numpy') @@ -154,6 +167,9 @@ def test(*, parent_callback, pytest_args, tests, markexpr, **kwargs): if markexpr != "full": pytest_args = ('-m', markexpr) + pytest_args + if parallel_threads != "1": + pytest_args = ('--parallel-threads', parallel_threads) + pytest_args + kwargs['pytest_args'] = pytest_args parent_callback(**{'pytest_args': pytest_args, 'tests': tests, **kwargs}) @@ -196,6 +212,8 @@ def check_docs(*, parent_callback, pytest_args, **kwargs): import scipy_doctest # noqa: F401 except ModuleNotFoundError as e: raise ModuleNotFoundError("scipy-doctest not installed") from e + if scipy_doctest.__version__ < '1.8.0': + raise ModuleNotFoundError("please update scipy_doctests to >= 1.8.0") if (not pytest_args): pytest_args = ('--pyargs', 'numpy') @@ -203,6 +221,7 @@ def check_docs(*, parent_callback, pytest_args, **kwargs): # turn doctesting on: doctest_args = ( '--doctest-modules', + '--doctest-only-doctests=true', '--doctest-collect=api' ) @@ -373,6 +392,16 @@ def lint(ctx, fix): '--quick', '-q', is_flag=True, default=False, help="Run each benchmark only once (timings won't be accurate)" ) +@click.option( + '--factor', '-f', default=1.05, + help="The factor above or below which a benchmark result is " + "considered reportable. This is passed on to the asv command." +) +@click.option( + '--cpu-affinity', default=None, multiple=False, + help="Set CPU affinity for running the benchmark, in format: 0 or 0,1,2 or 0-3." + "Default: not set" +) @click.argument( 'commits', metavar='', required=False, @@ -380,7 +409,8 @@ def lint(ctx, fix): ) @meson.build_dir_option @click.pass_context -def bench(ctx, tests, compare, verbose, quick, commits, build_dir): +def bench(ctx, tests, compare, verbose, quick, factor, cpu_affinity, + commits, build_dir): """🏋 Run benchmarks. \b @@ -423,6 +453,9 @@ def bench(ctx, tests, compare, verbose, quick, commits, build_dir): if quick: bench_args = ['--quick'] + bench_args + if cpu_affinity: + bench_args += ['--cpu-affinity', cpu_affinity] + if not compare: # No comparison requested; we build and benchmark the current version @@ -435,7 +468,7 @@ def bench(ctx, tests, compare, verbose, quick, commits, build_dir): meson._set_pythonpath(build_dir) p = spin.util.run( - ['python', '-c', 'import numpy as np; print(np.__version__)'], + [sys.executable, '-c', 'import numpy as np; print(np.__version__)'], cwd='benchmarks', echo=False, output=False @@ -452,7 +485,7 @@ def bench(ctx, tests, compare, verbose, quick, commits, build_dir): ] + bench_args _run_asv(cmd) else: - # Ensure that we don't have uncommited changes + # Ensure that we don't have uncommitted changes commit_a, commit_b = [_commit_to_sha(c) for c in commits] if commit_b == 'HEAD' and _dirty_git_working_dir(): @@ -463,7 +496,7 @@ def bench(ctx, tests, compare, verbose, quick, commits, build_dir): ) cmd_compare = [ - 'asv', 'continuous', '--factor', '1.05', + 'asv', 'continuous', '--factor', str(factor), ] + bench_args + [commit_a, commit_b] _run_asv(cmd_compare) @@ -518,6 +551,44 @@ def mypy(ctx): ctx.forward(test) +@click.command() +@click.option( + '--concise', + is_flag=True, + default=False, + help="Concise output format", +) +@meson.build_dir_option +def stubtest(*, concise: bool, build_dir: str) -> None: + """🧐 Run stubtest on NumPy's .pyi stubs + + Requires mypy to be installed + """ + click.get_current_context().invoke(build) + meson._set_pythonpath(build_dir) + print(f"{build_dir = !r}") + + import sysconfig + purellib = sysconfig.get_paths()["purelib"] + print(f"{purellib = !r}") + + stubtest_dir = curdir.parent / 'tools' / 'stubtest' + mypy_config = stubtest_dir / 'mypy.ini' + allowlist = stubtest_dir / 'allowlist.txt' + + cmd = [ + 'stubtest', + '--ignore-disjoint-bases', + f'--mypy-config-file={mypy_config}', + f'--allowlist={allowlist}', + ] + if concise: + cmd.append('--concise') + cmd.append('numpy') + + spin.util.run(cmd) + + @click.command(context_settings={ 'ignore_unknown_options': True }) diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst index 6e019983a0a2..0919790c65d1 100644 --- a/CONTRIBUTING.rst +++ b/CONTRIBUTING.rst @@ -7,8 +7,9 @@ Whether you're new to open source or experienced, your contributions help us grow. Pull requests (PRs) are always welcome, but making a PR is just the -start. Please respond to comments and requests for changes to help -move the process forward. Please follow our +start. Please respond to comments and requests for changes to help move the process forward. +Skip asking for an issue to be assigned to you on GitHub—send in your PR, explain what you did and ask for a review. It makes collaboration and support much easier. +Please follow our `Code of Conduct `__, which applies to all interactions, including issues and PRs. diff --git a/INSTALL.rst b/INSTALL.rst index 017e4de8c9d4..72caf98380b7 100644 --- a/INSTALL.rst +++ b/INSTALL.rst @@ -14,7 +14,7 @@ Prerequisites Building NumPy requires the following installed software: -1) Python__ 3.11.x or newer. +1) Python__ 3.12.x or newer. Please note that the Python development headers also need to be installed, e.g., on Debian/Ubuntu one needs to install both `python3` and @@ -82,7 +82,7 @@ Choosing compilers NumPy needs C and C++ compilers, and for development versions also needs Cython. A Fortran compiler isn't needed to build NumPy itself; the ``numpy.f2py`` tests will be skipped when running the test suite if no Fortran -compiler is available. +compiler is available. For more options including selecting compilers, setting custom compiler flags and controlling parallelism, see @@ -135,12 +135,8 @@ For best performance, a development package providing BLAS and CBLAS should be installed. Some of the options available are: - ``libblas-dev``: reference BLAS (not very optimized) -- ``libatlas-base-dev``: generic tuned ATLAS, it is recommended to tune it to - the available hardware, see /usr/share/doc/libatlas3-base/README.Debian for - instructions -- ``libopenblas-base``: fast and runtime detected so no tuning required but a - very recent version is needed (>=0.2.15 is recommended). Older versions of - OpenBLAS suffered from correctness issues on some CPUs. +- ``libopenblas-base``: (recommended) OpenBLAS is performant, and used + in the NumPy wheels on PyPI except where Apple's Accelerate is tuned better for Apple hardware The package linked to when numpy is loaded can be chosen after installation via the alternatives mechanism:: @@ -148,10 +144,6 @@ the alternatives mechanism:: update-alternatives --config libblas.so.3 update-alternatives --config liblapack.so.3 -Or by preloading a specific BLAS library with:: - - LD_PRELOAD=/usr/lib/atlas-base/atlas/libblas.so.3 python ... - Build issues ============ diff --git a/LICENSES_bundled.txt b/LICENSES_bundled.txt deleted file mode 100644 index b3d8aa8bed06..000000000000 --- a/LICENSES_bundled.txt +++ /dev/null @@ -1,36 +0,0 @@ -The NumPy repository and source distributions bundle several libraries that are -compatibly licensed. We list these here. - -Name: lapack-lite -Files: numpy/linalg/lapack_lite/* -License: BSD-3-Clause - For details, see numpy/linalg/lapack_lite/LICENSE.txt - -Name: dragon4 -Files: numpy/_core/src/multiarray/dragon4.c -License: MIT - For license text, see numpy/_core/src/multiarray/dragon4.c - -Name: libdivide -Files: numpy/_core/include/numpy/libdivide/* -License: Zlib - For license text, see numpy/_core/include/numpy/libdivide/LICENSE.txt - - -Note that the following files are vendored in the repository and sdist but not -installed in built numpy packages: - -Name: Meson -Files: vendored-meson/meson/* -License: Apache 2.0 - For license text, see vendored-meson/meson/COPYING - -Name: spin -Files: .spin/cmds.py -License: BSD-3 - For license text, see .spin/LICENSE - -Name: tempita -Files: numpy/_build_utils/tempita/* -License: MIT - For details, see numpy/_build_utils/tempita/LICENCE.txt diff --git a/README.md b/README.md index b2d3cffc8978..7bf1e13346ce 100644 --- a/README.md +++ b/README.md @@ -13,6 +13,7 @@ https://anaconda.org/conda-forge/numpy) https://stackoverflow.com/questions/tagged/numpy) [![Nature Paper](https://img.shields.io/badge/DOI-10.1038%2Fs41586--020--2649--2-blue)]( https://doi.org/10.1038/s41586-020-2649-2) +[![LFX Health Score](https://insights.linuxfoundation.org/api/badge/health-score?project=numpy)](https://insights.linuxfoundation.org/project/numpy) [![OpenSSF Scorecard](https://api.securityscorecards.dev/projects/github.com/numpy/numpy/badge)](https://securityscorecards.dev/viewer/?uri=github.com/numpy/numpy) [![Typing](https://img.shields.io/pypi/types/numpy)](https://pypi.org/project/numpy/) diff --git a/azure-pipelines.yml b/azure-pipelines.yml deleted file mode 100644 index 36362f6cacc7..000000000000 --- a/azure-pipelines.yml +++ /dev/null @@ -1,102 +0,0 @@ -trigger: - # start a new build for every push - batch: False - branches: - include: - - main - - maintenance/* - - -pr: - branches: - include: - - '*' # must quote since "*" is a YAML reserved character; we want a string - - -stages: - -- stage: Check - jobs: - - job: Skip - pool: - vmImage: 'ubuntu-22.04' - variables: - DECODE_PERCENTS: 'false' - RET: 'true' - steps: - - bash: | - git_log=`git log --max-count=1 --skip=1 --pretty=format:"%B" | tr "\n" " "` - echo "##vso[task.setvariable variable=log]$git_log" - - bash: echo "##vso[task.setvariable variable=RET]false" - condition: or(contains(variables.log, '[skip azp]'), contains(variables.log, '[azp skip]'), contains(variables.log, '[skip ci]'), contains(variables.log, '[ci skip]')) - - bash: echo "##vso[task.setvariable variable=start_main;isOutput=true]$RET" - name: result - -- stage: ComprehensiveTests - condition: and(succeeded(), eq(dependencies.Check.outputs['Skip.result.start_main'], 'true')) - dependsOn: Check - jobs: - - - job: Lint - condition: and(succeeded(), eq(variables['Build.Reason'], 'PullRequest')) - pool: - vmImage: 'ubuntu-22.04' - steps: - - task: UsePythonVersion@0 - inputs: - versionSpec: '3.11' - addToPath: true - architecture: 'x64' - - script: >- - python -m pip install -r requirements/linter_requirements.txt - displayName: 'Install tools' - # pip 21.1 emits a pile of garbage messages to annoy users :) - # failOnStderr: true - - script: | - python tools/linter.py - displayName: 'Run Lint Checks' - failOnStderr: true - - - job: Linux_Python_311_32bit_full_with_asserts - pool: - vmImage: 'ubuntu-22.04' - steps: - - script: | - git submodule update --init - displayName: 'Fetch submodules' - - script: | - # There are few options for i686 images at https://quay.io/organization/pypa, - # use the glibc2.17 one (manylinux2014) - docker run -v $(pwd):/numpy -e CFLAGS="-msse2 -std=c99 -UNDEBUG" \ - -e F77=gfortran-5 -e F90=gfortran-5 quay.io/pypa/manylinux2014_i686 \ - /bin/bash -xc "source /numpy/tools/ci/run_32_bit_linux_docker.sh" - displayName: 'Run 32-bit manylinux2014 Docker Build / Tests' - - - job: Windows - timeoutInMinutes: 120 - pool: - vmImage: 'windows-2019' - strategy: - maxParallel: 3 - matrix: - Python311-64bit-fast: - PYTHON_VERSION: '3.11' - PYTHON_ARCH: 'x64' - TEST_MODE: fast - BITS: 64 - Python312-64bit-full: - PYTHON_VERSION: '3.12' - PYTHON_ARCH: 'x64' - TEST_MODE: full - BITS: 64 - _USE_BLAS_ILP64: '1' -# TODO pypy: uncomment when pypy3.11 comes out -# PyPy311-64bit-fast: -# PYTHON_VERSION: 'pypy3.11' -# PYTHON_ARCH: 'x64' -# TEST_MODE: fast -# BITS: 64 -# _USE_BLAS_ILP64: '1' - - steps: - - template: azure-steps-windows.yml diff --git a/azure-steps-windows.yml b/azure-steps-windows.yml deleted file mode 100644 index 0baf374e1e3f..000000000000 --- a/azure-steps-windows.yml +++ /dev/null @@ -1,55 +0,0 @@ -steps: -- script: git submodule update --init - displayName: 'Fetch submodules' -- task: UsePythonVersion@0 - inputs: - versionSpec: $(PYTHON_VERSION) - addToPath: true - architecture: $(PYTHON_ARCH) - -- script: python -m pip install --upgrade pip wheel - displayName: 'Install tools' - -- script: python -m pip install -r requirements/test_requirements.txt - displayName: 'Install dependencies; some are optional to avoid test skips' - -- powershell: | - choco install -y --stoponfirstfailure --checksum 6004DF17818F5A6DBF19CB335CC92702 pkgconfiglite - displayName: 'Install utilities' - -- powershell: | - # Note: ensure the `pip install .` command remains the last one here, - # to avoid "green on failure" issues - If ( Test-Path env:DISABLE_BLAS ) { - python -m pip install . -v -Csetup-args="--vsenv" -Csetup-args="-Dblas=none" -Csetup-args="-Dlapack=none" -Csetup-args="-Dallow-noblas=true" - } - elseif ( Test-Path env:_USE_BLAS_ILP64 ) { - pip install -r requirements/ci_requirements.txt - spin config-openblas --with-scipy-openblas=64 - $env:PKG_CONFIG_PATH="$pwd/.openblas" - python -m pip install . -v -Csetup-args="--vsenv" - } else { - pip install -r requirements/ci_requirements.txt - spin config-openblas --with-scipy-openblas=32 - $env:PKG_CONFIG_PATH="$pwd/.openblas" - python -m pip install . -v -Csetup-args="--vsenv" - } - displayName: 'Build NumPy' - -- powershell: | - cd tools # avoid root dir to not pick up source tree - # Get a gfortran onto the path for f2py tests - $env:PATH = "c:\\rtools43\\x86_64-w64-mingw32.static.posix\\bin;$env:PATH" - If ( $env:TEST_MODE -eq "full" ) { - pytest --pyargs numpy -rsx --junitxml=junit/test-results.xml - } else { - pytest --pyargs numpy -m "not slow" -rsx --junitxml=junit/test-results.xml - } - displayName: 'Run NumPy Test Suite' - -- task: PublishTestResults@2 - condition: succeededOrFailed() - inputs: - testResultsFiles: '**/test-*.xml' - failTaskOnFailedTests: true - testRunTitle: 'Publish test results for Python $(PYTHON_VERSION) $(BITS)-bit $(TEST_MODE) Windows' diff --git a/benchmarks/benchmarks/bench_core.py b/benchmarks/benchmarks/bench_core.py index 5e174704f105..ea7aae007fdc 100644 --- a/benchmarks/benchmarks/bench_core.py +++ b/benchmarks/benchmarks/bench_core.py @@ -14,6 +14,7 @@ def setup(self): self.l_view = [memoryview(a) for a in self.l] self.l10x10 = np.ones((10, 10)) self.float64_dtype = np.dtype(np.float64) + self.arr = np.arange(10000).reshape(100, 100) def time_array_1(self): np.array(1) @@ -48,6 +49,9 @@ def time_array_l_view(self): def time_can_cast(self): np.can_cast(self.l10x10, self.float64_dtype) + def time_tobytes_noncontiguous(self): + self.arr.T.tobytes() + def time_can_cast_same_kind(self): np.can_cast(self.l10x10, self.float64_dtype, casting="same_kind") @@ -151,7 +155,8 @@ class CountNonzero(Benchmark): params = [ [1, 2, 3], [100, 10000, 1000000], - [bool, np.int8, np.int16, np.int32, np.int64, np.float32, np.float64, str, object] + [bool, np.int8, np.int16, np.int32, np.int64, np.float32, + np.float64, str, object] ] def setup(self, numaxes, size, dtype): diff --git a/benchmarks/benchmarks/bench_function_base.py b/benchmarks/benchmarks/bench_function_base.py index 57499dc761f8..f72d50eb74ce 100644 --- a/benchmarks/benchmarks/bench_function_base.py +++ b/benchmarks/benchmarks/bench_function_base.py @@ -236,12 +236,13 @@ class Sort(Benchmark): param_names = ['kind', 'dtype', 'array_type'] # The size of the benchmarked arrays. - ARRAY_SIZE = 10000 + ARRAY_SIZE = 1000000 def setup(self, kind, dtype, array_type): rnd = np.random.RandomState(507582308) array_class = array_type[0] - self.arr = getattr(SortGenerator, array_class)(self.ARRAY_SIZE, dtype, *array_type[1:], rnd) + generate_array_method = getattr(SortGenerator, array_class) + self.arr = generate_array_method(self.ARRAY_SIZE, dtype, *array_type[1:], rnd) def time_sort(self, kind, dtype, array_type): # Using np.sort(...) instead of arr.sort(...) because it makes a copy. diff --git a/benchmarks/benchmarks/bench_indexing.py b/benchmarks/benchmarks/bench_indexing.py index 6ac124cac88d..f1153489f515 100644 --- a/benchmarks/benchmarks/bench_indexing.py +++ b/benchmarks/benchmarks/bench_indexing.py @@ -84,6 +84,22 @@ def time_assign_cast(self, ndim): arr[indx] = val +class BooleanAssignmentOrder(Benchmark): + params = ['C', 'F'] + param_names = ['order'] + + def setup(self, order): + shape = (64, 64, 64) + # emulate gh-30156: boolean assignment into a Fortran/C array + self.base = np.zeros(shape, dtype=np.uint32, order=order) + mask = np.random.RandomState(0).rand(*self.base.shape) > 0.5 + self.mask = mask.copy(order) + self.value = np.uint32(7) + + def time_boolean_assign_scalar(self, order): + self.base[self.mask] = self.value + + class IndexingSeparate(Benchmark): def setup(self): self.tmp_dir = mkdtemp() @@ -134,6 +150,7 @@ def setup(self): self.m_half = np.copy(self.m_all) self.m_half[::2] = False self.m_none = np.repeat(False, 200 * 50000) + self.m_index_2d = np.arange(200 * 50000).reshape((100, 100000)) def time_flat_bool_index_none(self): self.a.flat[self.m_none] @@ -143,3 +160,21 @@ def time_flat_bool_index_half(self): def time_flat_bool_index_all(self): self.a.flat[self.m_all] + + def time_flat_fancy_index_2d(self): + self.a.flat[self.m_index_2d] + + def time_flat_empty_tuple_index(self): + self.a.flat[()] + + def time_flat_ellipsis_index(self): + self.a.flat[...] + + def time_flat_bool_index_0d(self): + self.a.flat[True] + + def time_flat_int_index(self): + self.a.flat[1_000_000] + + def time_flat_slice_index(self): + self.a.flat[1_000_000:2_000_000] diff --git a/benchmarks/benchmarks/bench_lib.py b/benchmarks/benchmarks/bench_lib.py index 0e60468308bb..11d454ae41bf 100644 --- a/benchmarks/benchmarks/bench_lib.py +++ b/benchmarks/benchmarks/bench_lib.py @@ -1,5 +1,8 @@ """Benchmarks for `numpy.lib`.""" +import string + +from asv_runner.benchmarks.mark import SkipNotImplemented import numpy as np @@ -119,37 +122,116 @@ def time_nanpercentile(self, array_size, percent_nans): class Unique(Benchmark): """Benchmark for np.unique with np.nan values.""" - param_names = ["array_size", "percent_nans"] + param_names = ["array_size", "percent_nans", "percent_unique_values", "dtype"] params = [ # sizes of the 1D arrays [200, int(2e5)], # percent of np.nan in arrays - [0, 0.1, 2., 50., 90.], + [0.0, 10., 90.], + # percent of unique values in arrays + [0.2, 20.], + # dtypes of the arrays + [np.float64, np.complex128, np.dtypes.StringDType(na_object=np.nan)], ] - def setup(self, array_size, percent_nans): - np.random.seed(123) + def setup(self, array_size, percent_nans, percent_unique_values, dtype): + rng = np.random.default_rng(123) # produce a randomly shuffled array with the # approximate desired percentage np.nan content - base_array = np.random.uniform(size=array_size) - n_nan = int(percent_nans * array_size) - nan_indices = np.random.choice(np.arange(array_size), size=n_nan) + unique_values_size = max(int(percent_unique_values / 100. * array_size), 2) + match dtype: + case np.float64: + unique_array = rng.uniform(size=unique_values_size).astype(dtype) + case np.complex128: + unique_array = np.array( + [ + complex(*rng.uniform(size=2)) + for _ in range(unique_values_size) + ], + dtype=dtype, + ) + case np.dtypes.StringDType(): + chars = string.ascii_letters + string.digits + unique_array = np.array( + [ + ''.join(rng.choice(list(chars), size=rng.integers(4, 8))) + for _ in range(unique_values_size) + ], + dtype=dtype, + ) + case _: + raise ValueError(f"Unsupported dtype {dtype}") + + base_array = np.resize(unique_array, array_size) + rng.shuffle(base_array) + # insert nans in random places + n_nan = int(percent_nans / 100. * array_size) + nan_indices = rng.choice(np.arange(array_size), size=n_nan, replace=False) base_array[nan_indices] = np.nan self.arr = base_array - def time_unique_values(self, array_size, percent_nans): + def time_unique_values(self, array_size, percent_nans, + percent_unique_values, dtype): + np.unique(self.arr, return_index=False, + return_inverse=False, return_counts=False) + + def time_unique_counts(self, array_size, percent_nans, + percent_unique_values, dtype): + np.unique(self.arr, return_index=False, + return_inverse=False, return_counts=True,) + + def time_unique_inverse(self, array_size, percent_nans, + percent_unique_values, dtype): + np.unique(self.arr, return_index=False, + return_inverse=True, return_counts=False) + + def time_unique_all(self, array_size, percent_nans, + percent_unique_values, dtype): + np.unique(self.arr, return_index=True, + return_inverse=True, return_counts=True) + + +class UniqueIntegers(Benchmark): + """Benchmark for np.unique with integer dtypes.""" + + param_names = ["array_size", "num_unique_values", "dtype"] + params = [ + # sizes of the 1D arrays + [200, 100000, 1000000], + # number of unique values in arrays + [25, 125, 5000, 50000, 250000], + # dtypes of the arrays + [np.uint8, np.int16, np.uint32, np.int64], + ] + + def setup(self, array_size, num_unique_values, dtype): + unique_array = np.arange(num_unique_values, dtype=dtype) + base_array = np.resize(unique_array, array_size) + rng = np.random.default_rng(121263137472525314065) + rng.shuffle(base_array) + self.arr = base_array + + def time_unique_values(self, array_size, num_unique_values, dtype): + if num_unique_values >= np.iinfo(dtype).max or num_unique_values > array_size: + raise SkipNotImplemented("skipped") np.unique(self.arr, return_index=False, return_inverse=False, return_counts=False) - def time_unique_counts(self, array_size, percent_nans): + def time_unique_counts(self, array_size, num_unique_values, dtype): + if num_unique_values >= np.iinfo(dtype).max or num_unique_values > array_size: + raise SkipNotImplemented("skipped") np.unique(self.arr, return_index=False, - return_inverse=False, return_counts=True) + return_inverse=False, return_counts=True,) - def time_unique_inverse(self, array_size, percent_nans): + def time_unique_inverse(self, array_size, num_unique_values, dtype): + if num_unique_values >= np.iinfo(dtype).max or num_unique_values > array_size: + raise SkipNotImplemented("skipped") np.unique(self.arr, return_index=False, return_inverse=True, return_counts=False) - def time_unique_all(self, array_size, percent_nans): + def time_unique_all(self, array_size, num_unique_values, dtype): + if num_unique_values >= np.iinfo(dtype).max or num_unique_values > array_size: + raise SkipNotImplemented("skipped") np.unique(self.arr, return_index=True, return_inverse=True, return_counts=True) diff --git a/benchmarks/benchmarks/bench_linalg.py b/benchmarks/benchmarks/bench_linalg.py index 03e2fd77f4f2..49a7ae84fde6 100644 --- a/benchmarks/benchmarks/bench_linalg.py +++ b/benchmarks/benchmarks/bench_linalg.py @@ -148,7 +148,9 @@ def setup(self, dtype): self.non_contiguous_dim1_small = np.arange(1, 80, 2, dtype=dtype) self.non_contiguous_dim1 = np.arange(1, 4000, 2, dtype=dtype) self.non_contiguous_dim2 = np.arange(1, 2400, 2, dtype=dtype).reshape(30, 40) - self.non_contiguous_dim3 = np.arange(1, 48000, 2, dtype=dtype).reshape(20, 30, 40) + + non_contiguous_dim3 = np.arange(1, 48000, 2, dtype=dtype) + self.non_contiguous_dim3 = non_contiguous_dim3.reshape(20, 30, 40) # outer(a,b): trigger sum_of_products_contig_stride0_outcontig_two def time_einsum_outer(self, dtype): @@ -180,11 +182,13 @@ def time_einsum_contig_outstride0(self, dtype): # outer(a,b): non_contiguous arrays def time_einsum_noncon_outer(self, dtype): - np.einsum("i,j", self.non_contiguous_dim1, self.non_contiguous_dim1, optimize=True) + np.einsum("i,j", self.non_contiguous_dim1, + self.non_contiguous_dim1, optimize=True) # multiply(a, b):non_contiguous arrays def time_einsum_noncon_multiply(self, dtype): - np.einsum("..., ...", self.non_contiguous_dim2, self.non_contiguous_dim3, optimize=True) + np.einsum("..., ...", self.non_contiguous_dim2, + self.non_contiguous_dim3, optimize=True) # sum and multiply:non_contiguous arrays def time_einsum_noncon_sum_mul(self, dtype): @@ -200,7 +204,8 @@ def time_einsum_noncon_mul(self, dtype): # contig_contig_outstride0_two: non_contiguous arrays def time_einsum_noncon_contig_contig(self, dtype): - np.einsum("ji,i->", self.non_contiguous_dim2, self.non_contiguous_dim1_small, optimize=True) + np.einsum("ji,i->", self.non_contiguous_dim2, + self.non_contiguous_dim1_small, optimize=True) # sum_of_products_contig_outstride0_one: non_contiguous arrays def time_einsum_noncon_contig_outstride0(self, dtype): diff --git a/benchmarks/benchmarks/bench_ndindex.py b/benchmarks/benchmarks/bench_ndindex.py new file mode 100644 index 000000000000..132d4eeed472 --- /dev/null +++ b/benchmarks/benchmarks/bench_ndindex.py @@ -0,0 +1,54 @@ +from itertools import product + +import numpy as np + +from .common import Benchmark + + +class NdindexBenchmark(Benchmark): + """ + Benchmark comparing numpy.ndindex() and itertools.product() + for different multi-dimensional shapes. + """ + + # Fix: Define each dimension separately, not as tuples + # ASV will pass each parameter list element to setup() + params = [ + [(10, 10), (20, 20), (50, 50), (10, 10, 10), (20, 30, 40), (50, 60, 90)] + ] + param_names = ["shape"] + + def setup(self, shape): + """Setup method called before each benchmark run.""" + # Access ndindex through NumPy's main namespace + self.ndindex = np.ndindex + + def time_ndindex(self, shape): + """ + Measure time taken by np.ndindex. + It creates an iterator that goes over each index. + """ + for _ in self.ndindex(*shape): + pass # Just loop through, no work inside + + def time_itertools_product(self, shape): + """ + Measure time taken by itertools.product. + Same goal: iterate over all index positions. + """ + for _ in product(*(range(s) for s in shape)): + pass + + def peakmem_ndindex(self, shape): + """ + Measure peak memory used when fully consuming + np.ndindex iterator by converting it to a list. + """ + return list(self.ndindex(*shape)) + + def peakmem_itertools_product(self, shape): + """ + Measure peak memory used when fully consuming + itertools.product iterator by converting it to a list. + """ + return list(product(*(range(s) for s in shape))) diff --git a/benchmarks/benchmarks/bench_ufunc.py b/benchmarks/benchmarks/bench_ufunc.py index 7dc321ac2980..ac978981faba 100644 --- a/benchmarks/benchmarks/bench_ufunc.py +++ b/benchmarks/benchmarks/bench_ufunc.py @@ -53,7 +53,7 @@ def setup(self, ufuncname): except AttributeError: raise NotImplementedError self.args = [] - for _, aarg in get_squares_().items(): + for aarg in get_squares_().values(): arg = (aarg,) * 1 # no nin try: self.afdn(*arg) @@ -100,7 +100,7 @@ def setup(self, ufuncname): except AttributeError: raise NotImplementedError self.args = [] - for _, aarg in get_squares_().items(): + for aarg in get_squares_().values(): arg = (aarg,) * self.ufn.nin try: self.ufn(*arg) @@ -304,7 +304,7 @@ def time_ndarray_dlp(self, methname, npdtypes): class NDArrayAsType(Benchmark): """ Benchmark for type conversion """ - params = [list(itertools.combinations(TYPES1, 2))] + params = [list(itertools.product(TYPES1, TYPES1))] param_names = ['typeconv'] timeout = 10 diff --git a/benchmarks/benchmarks/bench_ufunc_strides.py b/benchmarks/benchmarks/bench_ufunc_strides.py index b86be87f9e68..0c80b1877b3a 100644 --- a/benchmarks/benchmarks/bench_ufunc_strides.py +++ b/benchmarks/benchmarks/bench_ufunc_strides.py @@ -10,7 +10,7 @@ class _AbstractBinary(Benchmark): params = [] param_names = ['ufunc', 'stride_in0', 'stride_in1', 'stride_out', 'dtype'] timeout = 10 - arrlen = 10000 + arrlen = 1000000 data_finite = True data_denormal = False data_zeros = False @@ -63,7 +63,7 @@ class _AbstractUnary(Benchmark): params = [] param_names = ['ufunc', 'stride_in', 'stride_out', 'dtype'] timeout = 10 - arrlen = 10000 + arrlen = 1000000 data_finite = True data_denormal = False data_zeros = False @@ -208,8 +208,9 @@ def train(self, max_epoch): for epoch in range(max_epoch): z = np.matmul(self.X_train, self.W) A = 1 / (1 + np.exp(-z)) # sigmoid(z) - loss = -np.mean(self.Y_train * np.log(A) + (1 - self.Y_train) * np.log(1 - A)) - dz = A - self.Y_train + Y_train = self.Y_train + loss = -np.mean(Y_train * np.log(A) + (1 - Y_train) * np.log(1 - A)) + dz = A - Y_train dw = (1 / self.size) * np.matmul(self.X_train.T, dz) self.W = self.W - self.alpha * dw diff --git a/doc/BRANCH_WALKTHROUGH.rst b/doc/BRANCH_WALKTHROUGH.rst index 5767fb6e6a10..3f9db71a0282 100644 --- a/doc/BRANCH_WALKTHROUGH.rst +++ b/doc/BRANCH_WALKTHROUGH.rst @@ -1,6 +1,6 @@ -This guide contains a walkthrough of branching NumPy 1.21.x on Linux. The -commands can be copied into the command line, but be sure to replace 1.21 and -1.22 by the correct versions. It is good practice to make ``.mailmap`` as +This guide contains a walkthrough of branching NumPy 2.3.x on Linux. The +commands can be copied into the command line, but be sure to replace 2.3 and +2.4 by the correct versions. It is good practice to make ``.mailmap`` as current as possible before making the branch, that may take several weeks. This should be read together with the @@ -12,14 +12,13 @@ Branching Make the branch --------------- -This is only needed when starting a new maintenance branch. Because -NumPy now depends on tags to determine the version, the start of a new -development cycle in the main branch needs an annotated tag. That is done +This is only needed when starting a new maintenance branch. The start of a new +development cycle in the main branch should get an annotated tag. That is done as follows:: $ git checkout main $ git pull upstream main - $ git commit --allow-empty -m'REL: Begin NumPy 1.22.0 development' + $ git commit --allow-empty -m'REL: Begin NumPy 2.4.0 development' $ git push upstream HEAD If the push fails because new PRs have been merged, do:: @@ -28,20 +27,20 @@ If the push fails because new PRs have been merged, do:: and repeat the push. Once the push succeeds, tag it:: - $ git tag -a -s v1.22.0.dev0 -m'Begin NumPy 1.22.0 development' - $ git push upstream v1.22.0.dev0 + $ git tag -a -s v2.4.0.dev0 -m'Begin NumPy 2.4.0 development' + $ git push upstream v2.4.0.dev0 then make the new branch and push it:: - $ git branch maintenance/1.21.x HEAD^ - $ git push upstream maintenance/1.21.x + $ git branch maintenance/2.3.x HEAD^ + $ git push upstream maintenance/2.3.x Prepare the main branch for further development ----------------------------------------------- -Make a PR branch to prepare main for further development:: +Make a PR branch to prepare ``main`` for further development:: - $ git checkout -b 'prepare-main-for-1.22.0-development' v1.22.0.dev0 + $ git checkout -b 'prepare-main-for-2.4.0-development' v2.4.0.dev0 Delete the release note fragments:: @@ -49,18 +48,12 @@ Delete the release note fragments:: Create the new release notes skeleton and add to index:: - $ cp doc/source/release/template.rst doc/source/release/1.22.0-notes.rst - $ gvim doc/source/release/1.22.0-notes.rst # put the correct version - $ git add doc/source/release/1.22.0-notes.rst + $ cp doc/source/release/template.rst doc/source/release/2.4.0-notes.rst + $ gvim doc/source/release/2.4.0-notes.rst # put the correct version + $ git add doc/source/release/2.4.0-notes.rst $ gvim doc/source/release.rst # add new notes to notes index $ git add doc/source/release.rst -Update ``pavement.py`` and update the ``RELEASE_NOTES`` variable to point to -the new notes:: - - $ gvim pavement.py - $ git add pavement.py - Update ``cversions.txt`` to add current release. There should be no new hash to worry about at this early point, just add a comment following previous practice:: @@ -71,7 +64,7 @@ practice:: Check your work, commit it, and push:: $ git status # check work - $ git commit -m'REL: Prepare main for NumPy 1.22.0 development' + $ git commit -m'REL: Prepare main for NumPy 2.4.0 development' $ git push origin HEAD Now make a pull request. diff --git a/doc/DISTUTILS.rst b/doc/DISTUTILS.rst deleted file mode 100644 index 142c15a7124a..000000000000 --- a/doc/DISTUTILS.rst +++ /dev/null @@ -1,622 +0,0 @@ -.. -*- rest -*- - -NumPy distutils - users guide -============================= - -.. contents:: - -SciPy structure -''''''''''''''' - -Currently SciPy project consists of two packages: - -- NumPy --- it provides packages like: - - + numpy.distutils - extension to Python distutils - + numpy.f2py - a tool to bind Fortran/C codes to Python - + numpy._core - future replacement of Numeric and numarray packages - + numpy.lib - extra utility functions - + numpy.testing - numpy-style tools for unit testing - + etc - -- SciPy --- a collection of scientific tools for Python. - -The aim of this document is to describe how to add new tools to SciPy. - - -Requirements for SciPy packages -''''''''''''''''''''''''''''''' - -SciPy consists of Python packages, called SciPy packages, that are -available to Python users via the ``scipy`` namespace. Each SciPy package -may contain other SciPy packages. And so on. Therefore, the SciPy -directory tree is a tree of packages with arbitrary depth and width. -Any SciPy package may depend on NumPy packages but the dependence on other -SciPy packages should be kept minimal or zero. - -A SciPy package contains, in addition to its sources, the following -files and directories: - -+ ``setup.py`` --- building script -+ ``__init__.py`` --- package initializer -+ ``tests/`` --- directory of unittests - -Their contents are described below. - -The ``setup.py`` file -''''''''''''''''''''' - -In order to add a Python package to SciPy, its build script (``setup.py``) -must meet certain requirements. The most important requirement is that the -package define a ``configuration(parent_package='',top_path=None)`` function -which returns a dictionary suitable for passing to -``numpy.distutils.core.setup(..)``. To simplify the construction of -this dictionary, ``numpy.distutils.misc_util`` provides the -``Configuration`` class, described below. - -SciPy pure Python package example ---------------------------------- - -Below is an example of a minimal ``setup.py`` file for a pure SciPy package:: - - #!/usr/bin/env python3 - def configuration(parent_package='',top_path=None): - from numpy.distutils.misc_util import Configuration - config = Configuration('mypackage',parent_package,top_path) - return config - - if __name__ == "__main__": - from numpy.distutils.core import setup - #setup(**configuration(top_path='').todict()) - setup(configuration=configuration) - -The arguments of the ``configuration`` function specify the name of -parent SciPy package (``parent_package``) and the directory location -of the main ``setup.py`` script (``top_path``). These arguments, -along with the name of the current package, should be passed to the -``Configuration`` constructor. - -The ``Configuration`` constructor has a fourth optional argument, -``package_path``, that can be used when package files are located in -a different location than the directory of the ``setup.py`` file. - -Remaining ``Configuration`` arguments are all keyword arguments that will -be used to initialize attributes of ``Configuration`` -instance. Usually, these keywords are the same as the ones that -``setup(..)`` function would expect, for example, ``packages``, -``ext_modules``, ``data_files``, ``include_dirs``, ``libraries``, -``headers``, ``scripts``, ``package_dir``, etc. However, the direct -specification of these keywords is not recommended as the content of -these keyword arguments will not be processed or checked for the -consistency of SciPy building system. - -Finally, ``Configuration`` has ``.todict()`` method that returns all -the configuration data as a dictionary suitable for passing on to the -``setup(..)`` function. - -``Configuration`` instance attributes -------------------------------------- - -In addition to attributes that can be specified via keyword arguments -to ``Configuration`` constructor, ``Configuration`` instance (let us -denote as ``config``) has the following attributes that can be useful -in writing setup scripts: - -+ ``config.name`` - full name of the current package. The names of parent - packages can be extracted as ``config.name.split('.')``. - -+ ``config.local_path`` - path to the location of current ``setup.py`` file. - -+ ``config.top_path`` - path to the location of main ``setup.py`` file. - -``Configuration`` instance methods ----------------------------------- - -+ ``config.todict()`` --- returns configuration dictionary suitable for - passing to ``numpy.distutils.core.setup(..)`` function. - -+ ``config.paths(*paths) --- applies ``glob.glob(..)`` to items of - ``paths`` if necessary. Fixes ``paths`` item that is relative to - ``config.local_path``. - -+ ``config.get_subpackage(subpackage_name,subpackage_path=None)`` --- - returns a list of subpackage configurations. Subpackage is looked in the - current directory under the name ``subpackage_name`` but the path - can be specified also via optional ``subpackage_path`` argument. - If ``subpackage_name`` is specified as ``None`` then the subpackage - name will be taken the basename of ``subpackage_path``. - Any ``*`` used for subpackage names are expanded as wildcards. - -+ ``config.add_subpackage(subpackage_name,subpackage_path=None)`` --- - add SciPy subpackage configuration to the current one. The meaning - and usage of arguments is explained above, see - ``config.get_subpackage()`` method. - -+ ``config.add_data_files(*files)`` --- prepend ``files`` to ``data_files`` - list. If ``files`` item is a tuple then its first element defines - the suffix of where data files are copied relative to package installation - directory and the second element specifies the path to data - files. By default data files are copied under package installation - directory. For example, - - :: - - config.add_data_files('foo.dat', - ('fun',['gun.dat','nun/pun.dat','/tmp/sun.dat']), - 'bar/car.dat'. - '/full/path/to/can.dat', - ) - - will install data files to the following locations - - :: - - / - foo.dat - fun/ - gun.dat - pun.dat - sun.dat - bar/ - car.dat - can.dat - - Path to data files can be a function taking no arguments and - returning path(s) to data files -- this is a useful when data files - are generated while building the package. (XXX: explain the step - when this function are called exactly) - -+ ``config.add_data_dir(data_path)`` --- add directory ``data_path`` - recursively to ``data_files``. The whole directory tree starting at - ``data_path`` will be copied under package installation directory. - If ``data_path`` is a tuple then its first element defines - the suffix of where data files are copied relative to package installation - directory and the second element specifies the path to data directory. - By default, data directory are copied under package installation - directory under the basename of ``data_path``. For example, - - :: - - config.add_data_dir('fun') # fun/ contains foo.dat bar/car.dat - config.add_data_dir(('sun','fun')) - config.add_data_dir(('gun','/full/path/to/fun')) - - will install data files to the following locations - - :: - - / - fun/ - foo.dat - bar/ - car.dat - sun/ - foo.dat - bar/ - car.dat - gun/ - foo.dat - bar/ - car.dat - -+ ``config.add_include_dirs(*paths)`` --- prepend ``paths`` to - ``include_dirs`` list. This list will be visible to all extension - modules of the current package. - -+ ``config.add_headers(*files)`` --- prepend ``files`` to ``headers`` - list. By default, headers will be installed under - ``/include/pythonX.X//`` - directory. If ``files`` item is a tuple then it's first argument - specifies the installation suffix relative to - ``/include/pythonX.X/`` path. This is a Python distutils - method; its use is discouraged for NumPy and SciPy in favour of - ``config.add_data_files(*files)``. - -+ ``config.add_scripts(*files)`` --- prepend ``files`` to ``scripts`` - list. Scripts will be installed under ``/bin/`` directory. - -+ ``config.add_extension(name,sources,**kw)`` --- create and add an - ``Extension`` instance to ``ext_modules`` list. The first argument - ``name`` defines the name of the extension module that will be - installed under ``config.name`` package. The second argument is - a list of sources. ``add_extension`` method takes also keyword - arguments that are passed on to the ``Extension`` constructor. - The list of allowed keywords is the following: ``include_dirs``, - ``define_macros``, ``undef_macros``, ``library_dirs``, ``libraries``, - ``runtime_library_dirs``, ``extra_objects``, ``extra_compile_args``, - ``extra_link_args``, ``export_symbols``, ``swig_opts``, ``depends``, - ``language``, ``f2py_options``, ``module_dirs``, ``extra_info``, - ``extra_f77_compile_args``, ``extra_f90_compile_args``. - - Note that ``config.paths`` method is applied to all lists that - may contain paths. ``extra_info`` is a dictionary or a list - of dictionaries that content will be appended to keyword arguments. - The list ``depends`` contains paths to files or directories - that the sources of the extension module depend on. If any path - in the ``depends`` list is newer than the extension module, then - the module will be rebuilt. - - The list of sources may contain functions ('source generators') - with a pattern ``def (ext, build_dir): return - ``. If ``funcname`` returns ``None``, no sources - are generated. And if the ``Extension`` instance has no sources - after processing all source generators, no extension module will - be built. This is the recommended way to conditionally define - extension modules. Source generator functions are called by the - ``build_src`` sub-command of ``numpy.distutils``. - - For example, here is a typical source generator function:: - - def generate_source(ext,build_dir): - import os - from distutils.dep_util import newer - target = os.path.join(build_dir,'somesource.c') - if newer(target,__file__): - # create target file - return target - - The first argument contains the Extension instance that can be - useful to access its attributes like ``depends``, ``sources``, - etc. lists and modify them during the building process. - The second argument gives a path to a build directory that must - be used when creating files to a disk. - -+ ``config.add_library(name, sources, **build_info)`` --- add a - library to ``libraries`` list. Allowed keywords arguments are - ``depends``, ``macros``, ``include_dirs``, ``extra_compiler_args``, - ``f2py_options``, ``extra_f77_compile_args``, - ``extra_f90_compile_args``. See ``.add_extension()`` method for - more information on arguments. - -+ ``config.have_f77c()`` --- return True if Fortran 77 compiler is - available (read: a simple Fortran 77 code compiled successfully). - -+ ``config.have_f90c()`` --- return True if Fortran 90 compiler is - available (read: a simple Fortran 90 code compiled successfully). - -+ ``config.get_version()`` --- return version string of the current package, - ``None`` if version information could not be detected. This methods - scans files ``__version__.py``, ``_version.py``, - ``version.py``, ``__svn_version__.py`` for string variables - ``version``, ``__version__``, ``_version``. - -+ ``config.make_svn_version_py()`` --- appends a data function to - ``data_files`` list that will generate ``__svn_version__.py`` file - to the current package directory. The file will be removed from - the source directory when Python exits. - -+ ``config.get_build_temp_dir()`` --- return a path to a temporary - directory. This is the place where one should build temporary - files. - -+ ``config.get_distribution()`` --- return distutils ``Distribution`` - instance. - -+ ``config.get_config_cmd()`` --- returns ``numpy.distutils`` config - command instance. - -+ ``config.get_info(*names)`` --- - - -.. _templating: - -Conversion of ``.src`` files using templates --------------------------------------------- - -NumPy distutils supports automatic conversion of source files named -.src. This facility can be used to maintain very similar -code blocks requiring only simple changes between blocks. During the -build phase of setup, if a template file named .src is -encountered, a new file named is constructed from the -template and placed in the build directory to be used instead. Two -forms of template conversion are supported. The first form occurs for -files named .ext.src where ext is a recognized Fortran -extension (f, f90, f95, f77, for, ftn, pyf). The second form is used -for all other cases. - -.. index:: - single: code generation - -Fortran files -------------- - -This template converter will replicate all **function** and -**subroutine** blocks in the file with names that contain '<...>' -according to the rules in '<...>'. The number of comma-separated words -in '<...>' determines the number of times the block is repeated. What -these words are indicates what that repeat rule, '<...>', should be -replaced with in each block. All of the repeat rules in a block must -contain the same number of comma-separated words indicating the number -of times that block should be repeated. If the word in the repeat rule -needs a comma, leftarrow, or rightarrow, then prepend it with a -backslash ' \'. If a word in the repeat rule matches ' \\' then -it will be replaced with the -th word in the same repeat -specification. There are two forms for the repeat rule: named and -short. - -Named repeat rule -^^^^^^^^^^^^^^^^^ - -A named repeat rule is useful when the same set of repeats must be -used several times in a block. It is specified using , where N is the number of times the block -should be repeated. On each repeat of the block, the entire -expression, '<...>' will be replaced first with item1, and then with -item2, and so forth until N repeats are accomplished. Once a named -repeat specification has been introduced, the same repeat rule may be -used **in the current block** by referring only to the name -(i.e. ). - - -Short repeat rule -^^^^^^^^^^^^^^^^^ - -A short repeat rule looks like . The -rule specifies that the entire expression, '<...>' should be replaced -first with item1, and then with item2, and so forth until N repeats -are accomplished. - - -Pre-defined names -^^^^^^^^^^^^^^^^^ - -The following predefined named repeat rules are available: - -- - -- <_c=s,d,c,z> - -- <_t=real, double precision, complex, double complex> - -- - -- - -- - -- - - -Other files ------------- - -Non-Fortran files use a separate syntax for defining template blocks -that should be repeated using a variable expansion similar to the -named repeat rules of the Fortran-specific repeats. - -NumPy Distutils preprocesses C source files (extension: :file:`.c.src`) written -in a custom templating language to generate C code. The ``@`` symbol is -used to wrap macro-style variables to empower a string substitution mechanism -that might describe (for instance) a set of data types. - -The template language blocks are delimited by ``/**begin repeat`` -and ``/**end repeat**/`` lines, which may also be nested using -consecutively numbered delimiting lines such as ``/**begin repeat1`` -and ``/**end repeat1**/``: - -1. ``/**begin repeat`` on a line by itself marks the beginning of - a segment that should be repeated. - -2. Named variable expansions are defined using ``#name=item1, item2, item3, - ..., itemN#`` and placed on successive lines. These variables are - replaced in each repeat block with corresponding word. All named - variables in the same repeat block must define the same number of - words. - -3. In specifying the repeat rule for a named variable, ``item*N`` is short- - hand for ``item, item, ..., item`` repeated N times. In addition, - parenthesis in combination with ``*N`` can be used for grouping several - items that should be repeated. Thus, ``#name=(item1, item2)*4#`` is - equivalent to ``#name=item1, item2, item1, item2, item1, item2, item1, - item2#``. - -4. ``*/`` on a line by itself marks the end of the variable expansion - naming. The next line is the first line that will be repeated using - the named rules. - -5. Inside the block to be repeated, the variables that should be expanded - are specified as ``@name@``. - -6. ``/**end repeat**/`` on a line by itself marks the previous line - as the last line of the block to be repeated. - -7. A loop in the NumPy C source code may have a ``@TYPE@`` variable, targeted - for string substitution, which is preprocessed to a number of otherwise - identical loops with several strings such as ``INT``, ``LONG``, ``UINT``, - ``ULONG``. The ``@TYPE@`` style syntax thus reduces code duplication and - maintenance burden by mimicking languages that have generic type support. - -The above rules may be clearer in the following template source example: - -.. code-block:: NumPyC - :linenos: - :emphasize-lines: 3, 13, 29, 31 - - /* TIMEDELTA to non-float types */ - - /**begin repeat - * - * #TOTYPE = BYTE, UBYTE, SHORT, USHORT, INT, UINT, LONG, ULONG, - * LONGLONG, ULONGLONG, DATETIME, - * TIMEDELTA# - * #totype = npy_byte, npy_ubyte, npy_short, npy_ushort, npy_int, npy_uint, - * npy_long, npy_ulong, npy_longlong, npy_ulonglong, - * npy_datetime, npy_timedelta# - */ - - /**begin repeat1 - * - * #FROMTYPE = TIMEDELTA# - * #fromtype = npy_timedelta# - */ - static void - @FROMTYPE@_to_@TOTYPE@(void *input, void *output, npy_intp n, - void *NPY_UNUSED(aip), void *NPY_UNUSED(aop)) - { - const @fromtype@ *ip = input; - @totype@ *op = output; - - while (n--) { - *op++ = (@totype@)*ip++; - } - } - /**end repeat1**/ - - /**end repeat**/ - -The preprocessing of generically-typed C source files (whether in NumPy -proper or in any third party package using NumPy Distutils) is performed -by `conv_template.py`_. -The type-specific C files generated (extension: ``.c``) -by these modules during the build process are ready to be compiled. This -form of generic typing is also supported for C header files (preprocessed -to produce ``.h`` files). - -.. _conv_template.py: https://github.com/numpy/numpy/blob/main/numpy/distutils/conv_template.py - -Useful functions in ``numpy.distutils.misc_util`` -------------------------------------------------- - -+ ``get_numpy_include_dirs()`` --- return a list of NumPy base - include directories. NumPy base include directories contain - header files such as ``numpy/arrayobject.h``, ``numpy/funcobject.h`` - etc. For installed NumPy the returned list has length 1 - but when building NumPy the list may contain more directories, - for example, a path to ``config.h`` file that - ``numpy/base/setup.py`` file generates and is used by ``numpy`` - header files. - -+ ``append_path(prefix,path)`` --- smart append ``path`` to ``prefix``. - -+ ``gpaths(paths, local_path='')`` --- apply glob to paths and prepend - ``local_path`` if needed. - -+ ``njoin(*path)`` --- join pathname components + convert ``/``-separated path - to ``os.sep``-separated path and resolve ``..``, ``.`` from paths. - Ex. ``njoin('a',['b','./c'],'..','g') -> os.path.join('a','b','g')``. - -+ ``minrelpath(path)`` --- resolves dots in ``path``. - -+ ``rel_path(path, parent_path)`` --- return ``path`` relative to ``parent_path``. - -+ ``def get_cmd(cmdname,_cache={})`` --- returns ``numpy.distutils`` - command instance. - -+ ``all_strings(lst)`` - -+ ``has_f_sources(sources)`` - -+ ``has_cxx_sources(sources)`` - -+ ``filter_sources(sources)`` --- return ``c_sources, cxx_sources, - f_sources, fmodule_sources`` - -+ ``get_dependencies(sources)`` - -+ ``is_local_src_dir(directory)`` - -+ ``get_ext_source_files(ext)`` - -+ ``get_script_files(scripts)`` - -+ ``get_lib_source_files(lib)`` - -+ ``get_data_files(data)`` - -+ ``dot_join(*args)`` --- join non-zero arguments with a dot. - -+ ``get_frame(level=0)`` --- return frame object from call stack with given level. - -+ ``cyg2win32(path)`` - -+ ``mingw32()`` --- return ``True`` when using mingw32 environment. - -+ ``terminal_has_colors()``, ``red_text(s)``, ``green_text(s)``, - ``yellow_text(s)``, ``blue_text(s)``, ``cyan_text(s)`` - -+ ``get_path(mod_name,parent_path=None)`` --- return path of a module - relative to parent_path when given. Handles also ``__main__`` and - ``__builtin__`` modules. - -+ ``allpath(name)`` --- replaces ``/`` with ``os.sep`` in ``name``. - -+ ``cxx_ext_match``, ``fortran_ext_match``, ``f90_ext_match``, - ``f90_module_name_match`` - -``numpy.distutils.system_info`` module --------------------------------------- - -+ ``get_info(name,notfound_action=0)`` -+ ``combine_paths(*args,**kws)`` -+ ``show_all()`` - -``numpy.distutils.cpuinfo`` module ----------------------------------- - -+ ``cpuinfo`` - -``numpy.distutils.log`` module ------------------------------- - -+ ``set_verbosity(v)`` - - -``numpy.distutils.exec_command`` module ---------------------------------------- - -+ ``get_pythonexe()`` -+ ``find_executable(exe, path=None)`` -+ ``exec_command( command, execute_in='', use_shell=None, use_tee=None, **env )`` - -The ``__init__.py`` file -'''''''''''''''''''''''' - -The header of a typical SciPy ``__init__.py`` is:: - - """ - Package docstring, typically with a brief description and function listing. - """ - - # import functions into module namespace - from .subpackage import * - ... - - __all__ = [s for s in dir() if not s.startswith('_')] - - from numpy.testing import Tester - test = Tester().test - bench = Tester().bench - -Extra features in NumPy Distutils -''''''''''''''''''''''''''''''''' - -Specifying config_fc options for libraries in setup.py script -------------------------------------------------------------- - -It is possible to specify config_fc options in setup.py scripts. -For example, using:: - - config.add_library('library', - sources=[...], - config_fc={'noopt':(__file__,1)}) - -will compile the ``library`` sources without optimization flags. - -It's recommended to specify only those config_fc options in such a way -that are compiler independent. - -Getting extra Fortran 77 compiler options from source ------------------------------------------------------ - -Some old Fortran codes need special compiler options in order to -work correctly. In order to specify compiler options per source -file, ``numpy.distutils`` Fortran compiler looks for the following -pattern:: - - CF77FLAGS() = - -in the first 20 lines of the source and use the ``f77flags`` for -specified type of the fcompiler (the first character ``C`` is optional). - -TODO: This feature can be easily extended for Fortran 90 codes as -well. Let us know if you would need such a feature. diff --git a/doc/HOWTO_RELEASE.rst b/doc/HOWTO_RELEASE.rst index 53c3904703a4..d756a75a6bce 100644 --- a/doc/HOWTO_RELEASE.rst +++ b/doc/HOWTO_RELEASE.rst @@ -4,142 +4,113 @@ releases for NumPy. Current build and release info ============================== -Useful info can be found in the following locations: +Useful info can be found in `building-from-source` in the docs as well as in +these three files: -* **Source tree** - - - `INSTALL.rst `_ - - `pavement.py `_ - -* **NumPy docs** - - - `HOWTO_RELEASE.rst `_ - - `RELEASE_WALKTHROUGH.rst `_ - - `BRANCH_WALKTHROUGH.rst `_ +- `HOWTO_RELEASE.rst `_ +- `RELEASE_WALKTHROUGH.rst `_ +- `BRANCH_WALKTHROUGH.rst `_ Supported platforms and versions ================================ -:ref:`NEP 29 ` outlines which Python versions -are supported; For the first half of 2020, this will be Python >= 3.6. We test -NumPy against all these versions every time we merge code to main. Binary -installers may be available for a subset of these versions (see below). +:ref:`NEP 29 ` outlines which Python versions are supported *at a +minimum*. We usually decide to keep support for a given Python version slightly +longer than that minimum, to avoid giving other projects issues - this is at +the discretion of the release manager. -* **OS X** +* **macOS** - OS X versions >= 10.9 are supported, for Python version support see - :ref:`NEP 29 `. We build binary wheels for OSX that are compatible with - Python.org Python, system Python, homebrew and macports - see this - `OSX wheel building summary `_ - for details. + We aim to support the same set of macOS versions as are supported by + Python.org and `cibuildwheel`_ for any given Python version. + We build binary wheels for macOS that are compatible with common Python + installation methods, e.g., from python.org, ``python-build-standalone`` (the + ones ``uv`` installs), system Python, conda-forge, Homebrew and MacPorts. * **Windows** We build 32- and 64-bit wheels on Windows. Windows 7, 8 and 10 are supported. - We build NumPy using the `mingw-w64 toolchain`_, `cibuildwheels`_ and GitHub - actions. + We build NumPy using the most convenient compilers, which are (as of Aug + 2025) MSVC for x86/x86-64 and Clang-cl for arm64, `cibuildwheel`_ and GitHub + Actions. -.. _cibuildwheels: https://cibuildwheel.readthedocs.io/en/stable/ +.. _cibuildwheel: https://cibuildwheel.readthedocs.io/en/stable/ * **Linux** - We build and ship `manylinux_2_28 `_ - wheels for NumPy. Many Linux distributions include their own binary builds - of NumPy. + We build and ship ``manylinux`` and ``musllinux`` wheels for x86-64 and + aarch64 platforms on PyPI. Wheels for 32-bit platforms are not currently + provided. We aim to support the lowest non-EOL versions, and upgrade roughly + in sync with `cibuildwheel`_. See + `pypa/manylinux `__ and + `this distro compatibility table `__ + for more details. -* **BSD / Solaris** +* **BSD / Solaris / AIX** - No binaries are provided, but successful builds on Solaris and BSD have been - reported. + No binary wheels are provided on PyPI, however we expect building from source + on these platforms to work fine. -Tool chain +Toolchains ========== -We build all our wheels on cloud infrastructure - so this list of compilers is -for information and debugging builds locally. See the ``.travis.yml`` script -in the `numpy wheels`_ repo for an outdated source of the build recipes using -multibuild. - -.. _numpy wheels : https://github.com/MacPython/numpy-wheels - -Compilers ---------- -The same gcc version is used as the one with which Python itself is built on -each platform. At the moment this means: - -- OS X builds on travis currently use `clang`. It appears that binary wheels - for OSX >= 10.6 can be safely built from the travis-ci OSX 10.9 VMs - when building against the Python from the Python.org installers; -- Windows builds use the `mingw-w64 toolchain`_; -- Manylinux2014 wheels use the gcc provided on the Manylinux docker images. - -You will need Cython for building the binaries. Cython compiles the ``.pyx`` -files in the NumPy distribution to ``.c`` files. - -.. _mingw-w64 toolchain : https://mingwpy.github.io +For building wheels, we use the following toolchains: + +- Linux: we use the default compilers in the ``manylinux``/``musllinux`` Docker + images, which is usually a relatively recent GCC version. +- macOS: we use the Apple Clang compilers and XCode version installed on the + GitHub Actions runner image. +- Windows: for x86 and x86-64 we use the default MSVC and Visual Studio + toolchain installed on the relevant GitHub actions runner image. Note that in + the past it has sometimes been necessary to use an older toolchain to avoid + causing problems through the static ``libnpymath`` library for SciPy - please + inspect the `numpy/numpy-release `__ + code and CI logs in case the exact version numbers need to be determined. + +For building from source, minimum compiler versions are tracked in the top-level +``meson.build`` file. OpenBLAS -------- -All the wheels link to a version of OpenBLAS_ supplied via the openblas-libs_ repo. -The shared object (or DLL) is shipped with in the wheel, renamed to prevent name +Most wheels link to a version of OpenBLAS_ supplied via the openblas-libs_ repo. +The shared object (or DLL) is shipped within the wheel, renamed to prevent name collisions with other OpenBLAS shared objects that may exist in the filesystem. -.. _OpenBLAS: https://github.com/xianyi/OpenBLAS +.. _OpenBLAS: https://github.com/OpenMathLib/OpenBLAS .. _openblas-libs: https://github.com/MacPython/openblas-libs - -Building source archives and wheels ------------------------------------ -The NumPy wheels and sdist are now built using cibuildwheel with -github actions. - - Building docs ------------- -We are no longer building ``PDF`` files. All that will be needed is - -- virtualenv (pip). - -The other requirements will be filled automatically during the documentation -build process. - +We are no longer building ``pdf`` files. The requirements for building the +``html`` docs are no different than for regular development. See the README of +the `numpy/doc `__ repository and the step by +step instructions in ``doc/RELEASE_WALKTHROUGH.rst`` for more details. Uploading to PyPI ----------------- -The only application needed for uploading is - -- twine (pip). - -You will also need a PyPI token, which is best kept on a keyring. See the -twine keyring_ documentation for how to do that. - -.. _keyring: https://twine.readthedocs.io/en/stable/#keyring-support - +Creating a release on PyPI and uploading wheels and sdist is automated in CI +and uses `PyPI's trusted publishing `__. +See the README in the `numpy/numpy-release `__ +repository and the step by step instructions in ``doc/RELEASE_WALKTHROUGH.rst`` +for more details. Generating author/PR lists -------------------------- You will need a personal access token ``_ -so that scripts can access the github NumPy repository. - -- gitpython (pip) -- pygithub (pip) +so that scripts can access the GitHub NumPy repository. With that token, the +author/PR changelog content can be generated by running ``spin changelog``. It +may require a few extra packages, like ``gitpython`` and ``pygithub``. What is released ================ -* **Wheels** - We currently support Python 3.10-3.13 on Windows, OSX, and Linux. - - * Windows: 32-bit and 64-bit wheels built using Github actions; - * OSX: x64_86 and arm64 OSX wheels built using Github actions; - * Linux: x64_86 and aarch64 Manylinux2014 wheels built using Github actions. - -* **Other** - Release notes and changelog +On PyPI we release wheels for a number of platforms (as discussed higher up), +and an sdist. -* **Source distribution** - We build source releases in the .tar.gz format. +On GitHub Releases we release the same sdist (because the source archives which +are autogenerated by GitHub itself aren't complete), as well as the release +notes and changelog. Release process @@ -147,30 +118,11 @@ Release process Agree on a release schedule --------------------------- -A typical release schedule is one beta, two release candidates and a final -release. It's best to discuss the timing on the mailing list first, in order -for people to get their commits in on time, get doc wiki edits merged, etc. -After a date is set, create a new maintenance/x.y.z branch, add new empty -release notes for the next version in the main branch and update the Trac -Milestones. - - -Make sure current branch builds a package correctly ---------------------------------------------------- -The CI builds wheels when a PR header begins with ``REL``. Your last -PR before releasing should be so marked and all the tests should pass. -You can also do:: - - git clean -fxdq - python setup.py bdist_wheel - python setup.py sdist - -For details of the build process itself, it is best to read the -Step-by-Step Directions below. - -.. note:: The following steps are repeated for the beta(s), release - candidates(s) and the final release. - +A typical release schedule for a feature release is two release candidates and +a final release. It's best to discuss the timing on the mailing list first, in +order for people to get their commits in on time. After a date is set, create a +new ``maintenance/x.y.z`` branch, add new empty release notes for the next version +in the main branch and update the Milestones on the issue tracker. Check deprecations ------------------ diff --git a/doc/RELEASE_WALKTHROUGH.rst b/doc/RELEASE_WALKTHROUGH.rst index 702803172477..c8c1f129c0b2 100644 --- a/doc/RELEASE_WALKTHROUGH.rst +++ b/doc/RELEASE_WALKTHROUGH.rst @@ -1,8 +1,9 @@ -This is a walkthrough of the NumPy 2.1.0 release on Linux, modified for -building with GitHub Actions and cibuildwheels and uploading to the -`anaconda.org staging repository for NumPy `_. -The commands can be copied into the command line, but be sure to replace 2.1.0 -by the correct version. This should be read together with the +This is a walkthrough of the NumPy 2.4.0 release on Linux, which will be the +first feature release using the `numpy/numpy-release +`__ repository. + +The commands can be copied into the command line, but be sure to replace 2.4.0 +with the correct version. This should be read together with the :ref:`general release guide `. Facility preparation @@ -26,32 +27,33 @@ Prior to release Add/drop Python versions ------------------------ -When adding or dropping Python versions, three files need to be edited: - -- .github/workflows/wheels.yml # for github cibuildwheel -- tools/ci/cirrus_wheels.yml # for cibuildwheel aarch64/arm64 builds -- pyproject.toml # for classifier and minimum version check. - +When adding or dropping Python versions, multiple config and CI files need to +be edited in addition to changing the minimum version in ``pyproject.toml``. Make these changes in an ordinary PR against main and backport if necessary. -Add ``[wheel build]`` at the end of the title line of the commit summary so -that wheel builds will be run to test the changes. We currently release wheels -for new Python versions after the first Python rc once manylinux and -cibuildwheel support it. For Python 3.11 we were able to release within a week -of the rc1 announcement. +We currently release wheels for new Python versions after the first Python RC +once manylinux and cibuildwheel support that new Python version. Backport pull requests ---------------------- Changes that have been marked for this release must be backported to the -maintenance/2.1.x branch. +maintenance/2.4.x branch. + -Update 2.1.0 milestones +Update 2.4.0 milestones ----------------------- -Look at the issues/prs with 2.1.0 milestones and either push them off to a +Look at the issues/prs with 2.4.0 milestones and either push them off to a later version, or maybe remove the milestone. You may need to add a milestone. +Check the numpy-release repo +---------------------------- + +The things to check are the ``cibuildwheel`` version in +``.github/workflows/wheels.yml`` and the ``openblas`` versions in +``openblas_requirements.txt``. + Make a release PR ================= @@ -64,14 +66,13 @@ Four documents usually need to be updated or created for the release PR: - The ``pyproject.toml`` file These changes should be made in an ordinary PR against the maintenance branch. -The commit heading should contain a ``[wheel build]`` directive to test if the -wheels build. Other small, miscellaneous fixes may be part of this PR. The -commit message might be something like:: +Other small, miscellaneous fixes may be part of this PR. The commit message +might be something like:: - REL: Prepare for the NumPy 2.1.0 release [wheel build] + REL: Prepare for the NumPy 2.4.0 release - - Create 2.1.0-changelog.rst. - - Update 2.1.0-notes.rst. + - Create 2.4.0-changelog.rst. + - Update 2.4.0-notes.rst. - Update .mailmap. - Update pyproject.toml @@ -79,19 +80,18 @@ commit message might be something like:: Set the release version ----------------------- -Check the ``pyproject.toml`` file and set the release version if needed:: +Check the ``pyproject.toml`` file and set the release version and update the +classifier if needed:: $ gvim pyproject.toml -Check the ``pavement.py`` and ``doc/source/release.rst`` files --------------------------------------------------------------- +Check the ``doc/source/release.rst`` file +----------------------------------------- -Check that the ``pavement.py`` file points to the correct release notes. It should -have been updated after the last release, but if not, fix it now. Also make -sure that the notes have an entry in the ``release.rst`` file:: +make sure that the release notes have an entry in the ``release.rst`` file:: - $ gvim pavement.py doc/source/release.rst + $ gvim doc/source/release.rst Generate the changelog @@ -99,16 +99,15 @@ Generate the changelog The changelog is generated using the changelog tool:: - $ spin changelog $GITHUB v2.0.0..maintenance/2.1.x > doc/changelog/2.1.0-changelog.rst + $ spin changelog $GITHUB v2.3.0..maintenance/2.4.x > doc/changelog/2.4.0-changelog.rst where ``GITHUB`` contains your GitHub access token. The text will need to be -checked for non-standard contributor names and dependabot entries removed. It -is also a good idea to remove any links that may be present in the PR titles -as they don't translate well to markdown, replace them with monospaced text. The -non-standard contributor names should be fixed by updating the ``.mailmap`` -file, which is a lot of work. It is best to make several trial runs before -reaching this point and ping the malefactors using a GitHub issue to get the -needed information. +checked for non-standard contributor names. It is also a good idea to remove +any links that may be present in the PR titles as they don't translate well to +Markdown, replace them with monospaced text. The non-standard contributor names +should be fixed by updating the ``.mailmap`` file, which is a lot of work. It +is best to make several trial runs before reaching this point and ping the +malefactors using a GitHub issue to get the needed information. Finish the release notes @@ -119,7 +118,7 @@ run ``spin notes``, which will incorporate the snippets into the ``doc/source/release/notes-towncrier.rst`` file and delete the snippets:: $ spin notes - $ gvim doc/source/release/notes-towncrier.rst doc/source/release/2.1.0-notes.rst + $ gvim doc/source/release/notes-towncrier.rst doc/source/release/2.4.0-notes.rst Once the ``notes-towncrier`` contents has been incorporated into release note the ``.. include:: notes-towncrier.rst`` directive can be removed. The notes @@ -129,6 +128,20 @@ may also be appended, but not for the initial release as it is too long. Check previous release notes to see how this is done. +Test the wheel builds +--------------------- + +After the release PR is merged, go to the ``numpy-release`` repository in your +browser and manually trigger the workflow on the ``maintenance/2.4.x`` branch +using the ``Run workflow`` button in ``actions``. Make sure that the upload +target is ``none`` in the *evironment* dropdown. The wheels take about 1 hour +to build, but sometimes GitHub is very slow. If some wheel builds fail for +unrelated reasons, you can re-run them as normal in the GitHub Actions UI with +``re-run failed``. After the wheels are built review the results, checking that +the number of artifacts are correct, the wheel names are as expected, etc. If +everything looks good, proceed with the release. + + Release walkthrough =================== @@ -139,14 +152,14 @@ cloned it locally. You can also edit ``.git/config`` and add ``upstream`` if it isn't already present. -1. Prepare the release commit ------------------------------ +1. Tag the release commit +------------------------- Checkout the branch for the release, make sure it is up to date, and clean the repository:: - $ git checkout maintenance/2.1.x - $ git pull upstream maintenance/2.1.x + $ git checkout maintenance/2.4.x + $ git pull upstream maintenance/2.4.x $ git submodule update $ git clean -xdfq @@ -157,104 +170,60 @@ Sanity check:: Tag the release and push the tag. This requires write permission for the numpy repository:: - $ git tag -a -s v2.1.0 -m"NumPy 2.1.0 release" - $ git push upstream v2.1.0 + $ git tag -a -s v2.4.0 -m"NumPy 2.4.0 release" + $ git push upstream v2.4.0 If you need to delete the tag due to error:: - $ git tag -d v2.1.0 - $ git push --delete upstream v2.1.0 - - -2. Build wheels ---------------- - -Tagging the build at the beginning of this process will trigger a wheel build -via cibuildwheel and upload wheels and an sdist to the staging repo. The CI run -on github actions (for all x86-based and macOS arm64 wheels) takes about 1 1/4 -hours. The CI runs on cirrus (for aarch64 and M1) take less time. You can check -for uploaded files at the `staging repository`_, but note that it is not -closely synched with what you see of the running jobs. - -If you wish to manually trigger a wheel build, you can do so: - -- On github actions -> `Wheel builder`_ there is a "Run workflow" button, click - on it and choose the tag to build -- On Cirrus we don't currently have an easy way to manually trigger builds and - uploads. + $ git tag -d v2.4.0 + $ git push --delete upstream v2.4.0 -If a wheel build fails for unrelated reasons, you can rerun it individually: -- On github actions select `Wheel builder`_ click on the commit that contains - the build you want to rerun. On the left there is a list of wheel builds, - select the one you want to rerun and on the resulting page hit the - counterclockwise arrows button. -- On cirrus, log into cirrusci, look for the v2.1.0 tag and rerun the failed jobs. - -.. _`staging repository`: https://anaconda.org/multibuild-wheels-staging/numpy/files -.. _`Wheel builder`: https://github.com/numpy/numpy/actions/workflows/wheels.yml - - -3. Download wheels ------------------- - -When the wheels have all been successfully built and staged, download them from the -Anaconda staging directory using the ``tools/download-wheels.py`` script:: - - $ cd ../numpy - $ mkdir -p release/installers - $ python3 tools/download-wheels.py 2.1.0 - - -4. Generate the README files ----------------------------- - -This needs to be done after all installers are downloaded, but before the pavement -file is updated for continued development:: - - $ paver write_release +2. Build wheels and sdist +------------------------- +Go to the ``numpy-release`` repository in your browser and manually trigger the +workflow on the ``maintenance/2.4.x`` branch using the ``Run workflow`` button +in ``actions``. Make sure that the upload target is ``pypi`` in the +*evironment* dropdown. the wheels take about 1 hour to build, but sometimes +GitHub is very slow. If some wheel builds fail for unrelated reasons, you can +re-run them as normal in the GitHub Actions UI with ``re-run failed``. After +the wheels are built review the results, checking that the number of artifacts +are correct, the wheel names are as expected, etc. If everything looks good +trigger the upload. -5. Upload to PyPI ------------------ -Upload to PyPI using ``twine``. A recent version of ``twine`` of is needed -after recent PyPI changes, version ``3.4.1`` was used here:: +3. Upload files to GitHub Releases +---------------------------------- - $ cd ../numpy - $ twine upload release/installers/*.whl - $ twine upload release/installers/*.gz # Upload last. +Go to ``_, there should be a ``v2.4.0`` +tag, click on it and hit the edit button for that tag and update the title to +"v2.4.0 ()". There are two ways to add files, using an editable text +window and as binary uploads. The text window needs markdown, so translate the +release notes from rst to md:: -If one of the commands breaks in the middle, you may need to selectively upload -the remaining files because PyPI does not allow the same file to be uploaded -twice. The source file should be uploaded last to avoid synchronization -problems that might occur if pip users access the files while this is in -process, causing pip to build from source rather than downloading a binary -wheel. PyPI only allows a single source distribution, here we have -chosen the zip archive. + $ python tools/write_release.py 2.4.0 +this will create a ``release/README.md`` file that you can edit. Check the +result to see that it looks correct. Things that may need fixing: wrapped lines +that need unwrapping and links that should be changed to monospaced text. Then +copy the contents to the clipboard and paste them into the text window. It may +take several tries to get it look right. Then -6. Upload files to GitHub -------------------------- - -Go to ``_, there should be a ``v2.1.0 -tag``, click on it and hit the edit button for that tag and update the title to -'v2.1.0 (). There are two ways to add files, using an editable text -window and as binary uploads. Start by editing the ``release/README.md`` that -is translated from the rst version using pandoc. Things that will need fixing: -PR lines from the changelog, if included, are wrapped and need unwrapping, -links should be changed to monospaced text. Then copy the contents to the -clipboard and paste them into the text window. It may take several tries to get -it look right. Then - -- Upload ``release/installers/numpy-2.1.0.tar.gz`` as a binary file. +- Download the sdist (``numpy-2.4.0.tar.gz``) from PyPI and upload it to GitHub + as a binary file. You cannot do this using pip. - Upload ``release/README.rst`` as a binary file. -- Upload ``doc/changelog/2.1.0-changelog.rst`` as a binary file. +- Upload ``doc/changelog/2.4.0-changelog.rst`` as a binary file. - Check the pre-release button if this is a pre-releases. -- Hit the ``{Publish,Update} release`` button at the bottom. +- Hit the ``Publish release`` button at the bottom. + +.. note:: + Please ensure that all 3 files are uploaded are present and the + release text is complete. Releases are configured to be immutable, so + mistakes can't (easily) be fixed anymore. -7. Upload documents to numpy.org (skip for prereleases) +4. Upload documents to numpy.org (skip for prereleases) ------------------------------------------------------- .. note:: You will need a GitHub personal access token to push the update. @@ -264,7 +233,7 @@ and most patch releases. ``make merge-doc`` clones the ``numpy/doc`` repo into ``doc/build/merge`` and updates it with the new documentation:: $ git clean -xdfq - $ git co v2.1.0 + $ git co v2.4.0 $ rm -rf doc/build # want version to be current $ python -m spin docs merge-doc --build $ pushd doc/build/merge @@ -291,45 +260,46 @@ from ``numpy.org``:: Update the stable link and update:: - $ ln -sfn 2.1 stable + $ ln -sfn 2.4 stable $ ls -l # check the link Once everything seems satisfactory, update, commit and upload the changes:: - $ git commit -a -m"Add documentation for v2.1.0" + $ git commit -a -m"Add documentation for v2.4.0" $ git push git@github.com:numpy/doc $ popd -8. Reset the maintenance branch into a development state (skip for prereleases) +5. Reset the maintenance branch into a development state (skip for prereleases) ------------------------------------------------------------------------------- Create release notes for next release and edit them to set the version. These notes will be a skeleton and have little content:: - $ git checkout -b begin-2.1.1 maintenance/2.1.x - $ cp doc/source/release/template.rst doc/source/release/2.1.1-notes.rst - $ gvim doc/source/release/2.1.1-notes.rst - $ git add doc/source/release/2.1.1-notes.rst + $ git checkout -b begin-2.4.1 maintenance/2.4.x + $ cp doc/source/release/template.rst doc/source/release/2.4.1-notes.rst + $ gvim doc/source/release/2.4.1-notes.rst + $ git add doc/source/release/2.4.1-notes.rst -Add new release notes to the documentation release list and update the -``RELEASE_NOTES`` variable in ``pavement.py``:: +Add a link to the new release notes:: - $ gvim doc/source/release.rst pavement.py + $ gvim doc/source/release.rst Update the ``version`` in ``pyproject.toml``:: $ gvim pyproject.toml -Commit the result:: +Commit the result, edit the commit message, note the files in the commit, and +add a line ``[skip cirrus] [skip actions]``, then push:: - $ git commit -a -m"MAINT: Prepare 2.1.x for further development" + $ git commit -a -m"MAINT: Prepare 2.4.x for further development" + $ git rebase -i HEAD^ $ git push origin HEAD Go to GitHub and make a PR. It should be merged quickly. -9. Announce the release on numpy.org (skip for prereleases) +6. Announce the release on numpy.org (skip for prereleases) ----------------------------------------------------------- This assumes that you have forked ``_:: @@ -337,45 +307,47 @@ This assumes that you have forked ``_:: $ cd ../numpy.org $ git checkout main $ git pull upstream main - $ git checkout -b announce-numpy-2.1.0 + $ git checkout -b announce-numpy-2.4.0 $ gvim content/en/news.md - For all releases, go to the bottom of the page and add a one line link. Look to the previous links for example. - For the ``*.0`` release in a cycle, add a new section at the top with a short description of the new features and point the news link to it. +- Edit the newsHeader and date fields at the top of news.md +- Also edit the buttonText on line 14 in content/en/config.yaml commit and push:: - $ git commit -a -m"announce the NumPy 2.1.0 release" + $ git commit -a -m"announce the NumPy 2.4.0 release" $ git push origin HEAD Go to GitHub and make a PR. -10. Announce to mailing lists ------------------------------ +7. Announce to mailing lists +---------------------------- -The release should be announced on the numpy-discussion, scipy-devel, and +The release should be announced on the numpy-discussion and python-announce-list mailing lists. Look at previous announcements for the basic template. The contributor and PR lists are the same as generated for the -release notes above. If you crosspost, make sure that python-announce-list is +release notes above. If you cross-post, make sure that python-announce-list is BCC so that replies will not be sent to that list. -11. Post-release update main (skip for prereleases) ---------------------------------------------------- +8. Post-release update main (skip for prereleases) +-------------------------------------------------- Checkout main and forward port the documentation changes. You may also want to update these notes if procedures have changed or improved:: - $ git checkout -b post-2.1.0-release-update main - $ git checkout maintenance/2.1.x doc/source/release/2.1.0-notes.rst - $ git checkout maintenance/2.1.x doc/changelog/2.1.0-changelog.rst - $ git checkout maintenance/2.1.x .mailmap # only if updated for release. + $ git checkout -b post-2.4.0-release-update main + $ git checkout maintenance/2.4.x doc/source/release/2.4.0-notes.rst + $ git checkout maintenance/2.4.x doc/changelog/2.4.0-changelog.rst + $ git checkout maintenance/2.4.x .mailmap # only if updated for release. $ gvim doc/source/release.rst # Add link to new notes $ git status # check status before commit - $ git commit -a -m"MAINT: Update main after 2.1.0 release." + $ git commit -a -m"MAINT: Update main after 2.4.0 release." $ git push origin HEAD Go to GitHub and make a PR. diff --git a/doc/TESTS.rst b/doc/TESTS.rst index ee8a8b4b07e1..803625e727ae 100644 --- a/doc/TESTS.rst +++ b/doc/TESTS.rst @@ -74,6 +74,31 @@ Testing a subset of NumPy:: For detailed info on testing, see :ref:`testing-builds` +Running tests in multiple threads +--------------------------------- + +To help with stress testing NumPy for thread safety, the test suite can be run under +`pytest-run-parallel`_. To install ``pytest-run-parallel``:: + + $ pip install pytest-run-parallel + +To run the test suite in multiple threads:: + + $ spin test -p auto # have pytest-run-parallel detect the number of available cores + $ spin test -p 4 # run each test under 4 threads + $ spin test -p auto -- --skip-thread-unsafe=true # run ONLY tests that are thread-safe + +When you write new tests, it is worth testing to make sure they do not fail +under ``pytest-run-parallel``, since the CI jobs make use of it. Some tips on how to +write thread-safe tests can be found `here <#writing-thread-safe-tests>`_. + +.. note:: + + Ideally you should run ``pytest-run-parallel`` using a `free-threaded build of Python + `_ that is 3.14 or + higher. If you decide to use a version of Python that is not free-threaded, you will + need to set the environment variables ``PYTHON_CONTEXT_AWARE_WARNINGS`` and + ``PYTHON_THREAD_INHERIT_CONTEXT`` to 1. Running doctests ---------------- @@ -118,8 +143,6 @@ module called ``test_yyy.py``. If you only need to test one aspect of More often, we need to group a number of tests together, so we create a test class:: - import pytest - # import xxx symbols from numpy.xxx.yyy import zzz import pytest @@ -209,36 +232,34 @@ Similarly for methods:: def test_simple(self): assert_(zzz() == 'Hello from zzz') -Easier setup and teardown functions / methods ---------------------------------------------- - -Testing looks for module-level or class method-level setup and teardown -functions by name; thus:: - - def setup_module(): - """Module-level setup""" - print('doing setup') - - def teardown_module(): - """Module-level teardown""" - print('doing teardown') +Setup and teardown methods +-------------------------- +NumPy originally used xunit setup and teardown, a feature of `pytest`. We now encourage +the usage of setup and teardown methods that are called explicitly by the tests that +need them:: class TestMe: - def setup_method(self): - """Class-level setup""" + def setup(self): print('doing setup') + return 1 - def teardown_method(): - """Class-level teardown""" + def teardown(self): print('doing teardown') + def test_xyz(self): + x = self.setup() + assert x == 1 + self.teardown() + +This approach is thread-safe, ensuring tests can run under ``pytest-run-parallel``. +Using pytest setup fixtures (such as xunit setup methods) is generally not thread-safe +and will likely cause thread-safety test failures. -Setup and teardown functions to functions and methods are known as "fixtures", -and they should be used sparingly. ``pytest`` supports more general fixture at various scopes which may be used -automatically via special arguments. For example, the special argument name -``tmpdir`` is used in test to create a temporary directory. +automatically via special arguments. For example, the special argument name +``tmp_path`` is used in tests to create temporary directories. However, +fixtures should be used sparingly. Parametric tests ---------------- @@ -381,9 +402,9 @@ Tests on random data Tests on random data are good, but since test failures are meant to expose new bugs or regressions, a test that passes most of the time but fails occasionally with no code changes is not helpful. Make the random data -deterministic by setting the random number seed before generating it. Use -either Python's ``random.seed(some_number)`` or NumPy's -``numpy.random.seed(some_number)``, depending on the source of random numbers. +deterministic by setting the random number seed before generating it. +Use ``rng = numpy.random.RandomState(some_number)`` to set a seed on a +local instance of `numpy.random.RandomState`. Alternatively, you can use `Hypothesis`_ to generate arbitrary data. Hypothesis manages both Python's and Numpy's random seeds for you, and @@ -394,6 +415,44 @@ The advantages over random generation include tools to replay and share failures without requiring a fixed seed, reporting *minimal* examples for each failure, and better-than-naive-random techniques for triggering bugs. +Writing thread-safe tests +------------------------- + +Writing thread-safe tests may require some trial-and-error. Generally you should +follow the guidelines stated so far, especially when it comes to `setup methods +<#setup-and-teardown-methods>`_ and `seeding random data <#tests-on-random-data>`_. +Explicit setup and the usage of local RNG are thread-safe practices. Here are tips +for some other common problems you may run into. + +Using ``pytest.mark.parametrize`` may occasionally cause thread-safety issues. +To fix this, you can use ``copy()``:: + + @pytest.mark.parametrize('dimensionality', [3, 10, 25]) + @pytest.mark.parametrize('dtype', [np.float32, np.float64]) + def test_solve(dimensionality, dtype): + dimen = dimensionality.copy() + d = dtype.copy() + # use these copied variables instead + ... + +If you are testing something that is inherently thread-unsafe, you can label your +test with ``pytest.mark.thread_unsafe`` so that it will run under a single thread +and not cause test failures:: + + @pytest.mark.thread_unsafe(reason="reason this test is thread-unsafe") + def test_thread_unsafe(): + ... + +Some examples of what should be labeled as thread-unsafe: + +- Usage of ``sys.stdout`` and ``sys.stderr`` +- Mutation of global data, like docstrings, modules, garbage collectors, etc. +- Tests that require a lot of memory, since they could cause crashes. + +Additionally, some ``pytest`` fixtures are thread-unsafe, such as ``monkeypatch`` and +``capsys``. However, ``pytest-run-parallel`` will automatically mark these as +thread-unsafe if you decide to use them. Some fixtures have been patched to be +thread-safe, like ``tmp_path``. Documentation for ``numpy.test`` -------------------------------- @@ -406,3 +465,4 @@ Documentation for ``numpy.test`` .. _Hypothesis: https://hypothesis.readthedocs.io/en/latest/ .. _vscode: https://code.visualstudio.com/docs/python/testing#_enable-a-test-framework .. _pycharm: https://www.jetbrains.com/help/pycharm/testing-your-first-python-application.html +.. _pytest-run-parallel: https://github.com/Quansight-Labs/pytest-run-parallel diff --git a/doc/changelog/1.21.5-changelog.rst b/doc/changelog/1.21.5-changelog.rst index acd3599d48ef..04ff638d42a3 100644 --- a/doc/changelog/1.21.5-changelog.rst +++ b/doc/changelog/1.21.5-changelog.rst @@ -22,7 +22,7 @@ A total of 11 pull requests were merged for this release. * `#20462 `__: BUG: Fix float16 einsum fastpaths using wrong tempvar * `#20463 `__: BUG, DIST: Print os error message when the executable not exist * `#20464 `__: BLD: Verify the ability to compile C++ sources before initiating... -* `#20465 `__: BUG: Force ``npymath` ` to respect ``npy_longdouble`` +* `#20465 `__: BUG: Force ``npymath`` to respect ``npy_longdouble`` * `#20466 `__: BUG: Fix failure to create aligned, empty structured dtype * `#20467 `__: ENH: provide a convenience function to replace npy_load_module * `#20495 `__: MAINT: update wheel to version that supports python3.10 diff --git a/doc/changelog/2.3.0-changelog.rst b/doc/changelog/2.3.0-changelog.rst new file mode 100644 index 000000000000..7ca672ba8dbf --- /dev/null +++ b/doc/changelog/2.3.0-changelog.rst @@ -0,0 +1,704 @@ + +Contributors +============ + +A total of 134 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* !DWesl +* !EarlMilktea + +* !amotzop + +* !fengluoqiuwu +* !h-vetinari +* !karl3wm + +* !partev +* !samir539 + +* !wenlong2 + +* Aarni Koskela + +* Abdu Zoghbi + +* Abhishek Kumar +* Adam J. Stewart +* Aditi Juneja +* Adrin Jalali +* Agriya Khetarpal +* Alicia Boya García + +* Andrej Zhilenkov +* Andrew Nelson +* Angus Gibson + +* Antonio Rech Santos + +* Ari Gato + +* Arnaud Tremblay + +* Arvid Bessen + +* Baskar Gopinath + +* Carlos Martin + +* Charles Harris +* Chris Navarro + +* Chris Sidebottom +* Christian Lorentzen +* Christine P. Chai + +* Christopher Sidebottom +* ClÊment Robert +* Colin Gilgenbach + +* Craig Peters + +* CÊdric Hannotier +* Daniel Hrisca +* Derek Homeier +* Diego Baldassar + +* Dimitri Papadopoulos Orfanos +* Eoghan O'Connell + +* Eric Larson +* Ernst Peng + +* Evgeni Burovski +* Filipe Laíns +* François Rozet + +* François de Coatpont + +* GUAN MING +* Giovanni Del Monte +* Guan Ming(Wesley) Chiu + +* Guido Imperiale + +* Gyeongjae Choi + +* Halle Loveday + +* Hannah Wheeler + +* Hao Chen + +* Harmen Stoppels + +* Hin-Tak Leung + +* Ian DesJardin + +* Ihar Hrachyshka + +* Ilhan Polat +* Inessa Pawson +* J. Steven Dodge + +* Jake VanderPlas +* Jiachen An + +* Jiuding Tan (谭九éŧŽ) +* Joe Rickerby + +* John Kirkham +* John Stilley + +* Jonathan Albrecht + +* Joren Hammudoglu +* Kai Germaschewski + +* Krishna Bindumadhavan + +* Lucas Colley +* Luka Krmpotić + +* Lysandros Nikolaou +* Maanas Arora +* Makima C. Yang + +* Marco Barbosa + +* Marco Edward Gorelli + +* Mark Harfouche +* Marten van Kerkwijk +* Mateusz SokÃŗÅ‚ +* Matt Haberland +* Matthew Brett +* Matthew Goldsberry + +* Matthew Sterrett +* Matthias Diener +* Matthieu Darbois +* Matti Picus +* Melissa Weber Mendonça +* Michael Siebert +* Mike O'Brien + +* Mohammed Abdul Rahman + +* Mugundan Selvanayagam + +* Musharaf Aijaz Baba + +* Musharraffaijaz + +* Nathan Goldbaum +* Nicholas Christensen + +* Nitish Satyavolu + +* Omid Rajaei +* PTUsumit + +* Peter Hawkins +* Peyton Murray +* Pieter Eendebak +* Raghuveer Devulapalli +* Ralf Gommers +* Rob Timpe + +* Robert Kern +* Rohit Goswami +* Ross Barnowski +* Roy Smart +* Saransh Chopra +* Saraswathy Kalaiselvan + +* Sayed Adel +* Sebastian Berg +* Shantanu Jain + +* Shashwat Pandey + +* Shi Entong + +* Simon Altrogge +* Stan Ulbrych +* Thomas A Caswell +* ThÊotime Grohens + +* Tyler Reddy +* WANG Xuerui + +* WEN Hao + +* Wang Yang (杨æ—ē) + +* Warren Weckesser +* Warrick Ball +* William Andrea +* Yakov Danishevsky + +* Yichi Zhang + +* Yuvraj Pradhan + +* dependabot[bot] +* hfloveday12 + + +Pull requests merged +==================== + +A total of 556 pull requests were merged for this release. + +* `#22718 `__: DOC: Add docs on using GitHub Codespaces for NumPy development +* `#25675 `__: ENH: add matvec and vecmat gufuncs +* `#25934 `__: ENH: Convert tanh from C universal intrinsics to C++ using Highway +* `#25991 `__: ENH: Optimize polyutils as_series +* `#26018 `__: ENH add hash based unique +* `#26745 `__: ENH, DOC: Add support for interactive examples for NumPy with... +* `#26958 `__: BUG: index overlap copy +* `#27288 `__: BUG: Scalar array comparison should return np.bool +* `#27300 `__: CI: pycodestyle → ruff +* `#27309 `__: MNT: Enforce ruff/Pyflakes rules (F) +* `#27324 `__: DOC: Removing module name from by-topic docs +* `#27343 `__: ENH: Add support for flat indexing on flat iterator +* `#27404 `__: DOC: document type promotion with Python types +* `#27522 `__: ENH: Cleanup npy_find_array_wrap +* `#27523 `__: ENH: Improve performance of np.count_nonzero for float arrays +* `#27648 `__: MAINT: Fix the code style to our C-Style-Guide +* `#27738 `__: DEP: testing: disable deprecated use of keywords x/y +* `#27784 `__: BUG: ``sinc``\ : fix underflow for float16 +* `#27789 `__: ENH: Implement np.strings.slice as a gufunc +* `#27819 `__: CI: add windows free-threaded CI +* `#27823 `__: BEG, MAINT: Begin NumPy 2.3.0 development. +* `#27824 `__: BUG: Fix mismatch in definition and declaration for a couple... +* `#27826 `__: CI: update circleci to python3.11.10, limit parallel builds. +* `#27827 `__: CI: skip ninja installation in linux_qemu workflows +* `#27830 `__: ENH: speedup evaluation of numpy.polynomial.legendre.legval. +* `#27839 `__: DOC: Correct version-added for mean arg for nanvar and nanstd +* `#27841 `__: BUG: Never negate strides in reductions (for now) +* `#27847 `__: MAINT: Bump pypa/cibuildwheel from 2.21.3 to 2.22.0 +* `#27848 `__: MAINT: Bump mamba-org/setup-micromamba from 2.0.1 to 2.0.2 +* `#27850 `__: DOC: Correct versionadded for vecmat and matvec. +* `#27854 `__: MAINT: Use mask_store instead of store for compiler bug workaround +* `#27856 `__: SIMD: add lsx optimization for loongarch, and add Qemu tests +* `#27858 `__: DOC: Fix typo +* `#27860 `__: MAINT: Add helper for static or heap allocated scratch space +* `#27862 `__: MAINT: Drop Python 3.10 support. +* `#27864 `__: ENH: stack custom multiarray import exception with the original... +* `#27868 `__: BUG: fix importing numpy in Python's optimized mode +* `#27869 `__: TYP: Fix ``np.interp`` signature for scalar types +* `#27875 `__: MAINT: Ensure correct handling for very large unicode strings +* `#27877 `__: ENH: Refactor ``__qualname__`` across API +* `#27878 `__: DOC: Fix double import in docs +* `#27879 `__: DEV: Add venv files to .gitignore +* `#27883 `__: MAINT,ENH: Reorganize buffered iteration setup +* `#27884 `__: ENH: Remove unnecessary list collection +* `#27886 `__: MAINT: Move uint aligned check to actual transfer function setup +* `#27887 `__: MAINT: A few other small nditer fixes +* `#27896 `__: PERF: improve multithreaded ufunc scaling +* `#27897 `__: MAINT: Bump github/codeql-action from 3.27.5 to 3.27.6 +* `#27898 `__: MAINT: Remove ``25675.new_feature.rst`` snippet. +* `#27899 `__: TST: add timeouts for github actions tests and wheel builds. +* `#27901 `__: MAINT: simplify power fast path logic +* `#27910 `__: MAINT: Make qualname tests more specific and fix code where needed +* `#27914 `__: DOC: Remove 27896-snippet. +* `#27915 `__: MAINT: Bump actions/cache from 4.1.2 to 4.2.0 +* `#27917 `__: CI: Use hashes in specifying some actions. +* `#27920 `__: DOC: Fix invalid URL in the index.rst file. +* `#27921 `__: MAINT: Bump actions/checkout from 4.1.1 to 4.2.2 +* `#27922 `__: MAINT: Move user pointers out of axisdata and simplify iternext +* `#27923 `__: ENH: Add cython wrappers for NpyString API +* `#27927 `__: DOC: Use internal/intersphinx links for neps. +* `#27930 `__: MAINT: Fix cirrus MacOs wheel builds [wheel build] +* `#27931 `__: CI: audit with zizmor +* `#27933 `__: BUG: fix building numpy on musl s390x +* `#27936 `__: MAINT: Update main after 2.2.0 release. +* `#27940 `__: BUG: Fix potential inconsistent behaviour for high-demnsional... +* `#27943 `__: TEST: cleanups +* `#27947 `__: BUG:fix compile error libatomic link test to meson.build +* `#27955 `__: BUG: fix use-after-free error in npy_hashtable.cpp +* `#27956 `__: BLD: add missing include to fix build with freethreading +* `#27962 `__: MAINT: Bump github/codeql-action from 3.27.6 to 3.27.7 +* `#27963 `__: MAINT: Bump mamba-org/setup-micromamba from 2.0.2 to 2.0.3 +* `#27967 `__: TYP: Fix falsely rejected value types in ``ndarray.__setitem__`` +* `#27973 `__: MAINT: Apply assorted ruff/flake8-pie rules (PIE) +* `#27974 `__: MAINT: Apply ruff/flake8-implicit-str-concat rules (ISC) +* `#27975 `__: MAINT: Apply ruff/flake8-comprehensions rules (C4) +* `#27976 `__: MAINT: Apply assorted ruff/flake8-pyi rules (PYI) +* `#27978 `__: MAINT: Apply assorted ruff/flake8-simplify rules (SIM) +* `#27981 `__: DOC: Document abi3 compat +* `#27992 `__: BUG: Fix segfault in stringdtype lexsort +* `#27996 `__: MAINT: Bump github/codeql-action from 3.27.7 to 3.27.9 +* `#27997 `__: MAINT: Remove unnecessary (and not safe in free-threaded) 1-D... +* `#27998 `__: API,MAINT: Make ``NpyIter_GetTransferFlags`` public and avoid... +* `#27999 `__: DOC, MAINT: Fix typos found by codespell +* `#28001 `__: DOC: Fix documentation for np.dtype.kind to include 'T' for StringDType +* `#28003 `__: MAINT: random: Tweak module code in mtrand.pyx to fix a Cython... +* `#28004 `__: DOC: fix several doctests in dtype method docstrings +* `#28005 `__: BUG: Cython API was missing NPY_UINTP. +* `#28008 `__: BUG: Fix handling of matrix class in np.unique. +* `#28009 `__: TST: lib: Test average with object dtype and no weights. +* `#28013 `__: DOC: Fixed typos in development_advanced_debugging.rst +* `#28015 `__: MAINT: run ruff from the repository root +* `#28020 `__: CI: pin scipy-doctest to 1.5.1 +* `#28022 `__: MAINT: Add all submodules to ruff exclusion list. +* `#28023 `__: DOC: update to scipy-doctest 1.6.0 and fix tests +* `#28029 `__: MAINT: Bump actions/upload-artifact from 4.4.3 to 4.5.0 +* `#28032 `__: BUG,MAINT: Fix size bug in new alloc helper and use it in one... +* `#28033 `__: MAINT: Use userpointers to avoid NPY_MAXARGS in iternext() +* `#28035 `__: MAINT: Move ``lib.format`` and ``ctypeslib`` to submodules/private... +* `#28036 `__: Replace Twitter with X +* `#28039 `__: TYP: allow ``None`` in operand sequence of nditer +* `#28043 `__: BUG: Ensure einsum uses chunking (now that nditer doesn't) +* `#28051 `__: MAINT: Update main after 2.2.1 release. +* `#28053 `__: TYP: fix ``void`` arrays not accepting ``str`` keys in ``__setitem__`` +* `#28056 `__: BUG: Fix f2py directives and ``--lower`` casing +* `#28058 `__: MAINT: Update ``spin lint`` command +* `#28060 `__: CI: fix check that GIL remains disabled for free-threaded wheels +* `#28065 `__: TYP: fix unnecessarily broad ``integer`` binop return types +* `#28069 `__: MAINT: update NEP 29 +* `#28073 `__: TYP: use mypy_primer to surface type checking regressions +* `#28074 `__: DOC: clarify np.gradient varargs requirement for axis parameter +* `#28075 `__: MAINT: Replace usage of fixed strides with inner strides in einsum +* `#28080 `__: ENH: Allow an arbitrary number of operands in nditer +* `#28081 `__: DOC: Add release snippets for iteration changes +* `#28083 `__: MAINT: Update LICENSE Copyright to 2025 +* `#28088 `__: BUG: update check for highway compiler support +* `#28089 `__: MAINT: bump ``mypy`` to ``1.14.1`` +* `#28090 `__: DOC:Fixed docstring with example use of np.select +* `#28091 `__: MAINT: Refactor stringdtype casts.c to use cpp templates +* `#28092 `__: MAINT: LoongArch: switch away from the __loongarch64 preprocessor... +* `#28094 `__: DOC: Fix documentation example for numpy.ma.masked +* `#28100 `__: DOC: Move linalg.outer from Decompositions to Matrix and vector... +* `#28101 `__: DOC: Fix sphinx markup in source/reference/random/extending.rst +* `#28102 `__: MAINT: update oldest supported GCC version from 8.4 to 9.3 +* `#28103 `__: MAINT: random: Call np.import_array() in _examples/cython/extending_distribution... +* `#28105 `__: ENH: support no-copy pickling for any array that can be transposed... +* `#28108 `__: TYP: Better ``ndarray`` binop return types for ``float64`` &... +* `#28109 `__: TYP: Fix the incorrect ``bool`` return type of ``issubdtype`` +* `#28110 `__: TYP: Always accept ``date[time]`` in the ``datetime64`` constructor +* `#28116 `__: MAINT: random: Explicitly cast RAND_INT_MAX to double to avoid... +* `#28118 `__: BUG: Fix auxdata initialization in ufunc slow path +* `#28121 `__: MAINT: Correct NumPy 2.3 C-API versioning and version information +* `#28123 `__: BUG: move reduction initialization to ufunc initialization +* `#28127 `__: DOC: Improve slice docstrings +* `#28128 `__: BUG: Don't use C99 construct in import_array +* `#28129 `__: DEP: Deprecate ``numpy.typing.mypy_plugin`` +* `#28130 `__: CI: Fix mypy_primer comment workflow +* `#28133 `__: BUG: call PyType_Ready in f2py to avoid data races +* `#28134 `__: DEP: Deprecate ``numpy.typing.mypy_plugin``\ : The sequel +* `#28141 `__: DOC: Add instructions to build NumPy on WoA +* `#28142 `__: ENH: inline UTF-8 byte counter and make it branchless +* `#28144 `__: BUG: remove unnecessary call to PyArray_UpdateFlags +* `#28148 `__: MAINT: Replace usage of outdated fixed strides with inner strides... +* `#28149 `__: BUG: Fix ``from_float_positional`` errors for huge pads +* `#28154 `__: BUG: Avoid data race in PyArray_CheckFromAny_int +* `#28161 `__: DOC: Clarify ``np.loadtxt`` encoding argument default value in... +* `#28163 `__: MAINT: Avoid a redundant copy on ``a[...] = b`` +* `#28167 `__: DOC: fix formatting typo in basics.copies.rst +* `#28168 `__: TYP: Fix overlapping overloads issue in "2 in, 1 out" ufuncs +* `#28169 `__: TYP: preserve shape-type in ``ndarray.astype()`` +* `#28170 `__: TYP: Fix missing and spurious top-level exports +* `#28172 `__: BUG: Include Python-including headers first +* `#28179 `__: DOC: Remove duplicate wishlist tab in NEPs. +* `#28180 `__: DOC: Update links in HOWTO_RELEASE.rst +* `#28181 `__: CI: replace quansight-labs/setup-python with astral-sh/setup-uv +* `#28183 `__: MAINT: testing: specify python executable to use in extbuild +* `#28186 `__: MAINT: Update main after 2.2.2 release. +* `#28189 `__: MAINT, DOC: Add sphinx extension to allow svg images in PDF docs... +* `#28202 `__: MAINT: Use VQSORT_COMPILER_COMPATIBLE to determine if we should... +* `#28203 `__: BUG: fix data race in ``np.repeat`` +* `#28206 `__: BUG: Remove unnecessary copying and casting from out array in... +* `#28210 `__: corrected the numpy logo visibility issues on darkmode with the... +* `#28211 `__: MAINT: Hide decorator from pytest traceback +* `#28214 `__: ENH: add pkg_config entrypoint +* `#28219 `__: DOC: Add versionadded directive for axis argument in trim_zeros... +* `#28221 `__: BUG: allclose does not warn for invalid value encountered in... +* `#28222 `__: MAINT: Update highway to latest +* `#28223 `__: MAINT: Add [[maybe_unused] to silence some warnings +* `#28226 `__: DOC: Clarify ``__array__`` protocol arguments +* `#28228 `__: BUG: handle case when StringDType na_object is nan in float to... +* `#28229 `__: DOC: Fix a typo in doc/source/dev/development_workflow.rst +* `#28230 `__: DOC: FIx a link in Roadmap +* `#28231 `__: DOC: Fix external links in the navbar of neps webpage +* `#28232 `__: BUG: Fix float128 FPE handling on ARM64 with Clang compiler +* `#28234 `__: BUG: Add cpp atomic support +* `#28235 `__: MAINT: Compile fix for clang-cl on WoA +* `#28241 `__: TYP: Avoid upcasting ``float64`` in the set-ops +* `#28242 `__: CI: Fix the github label for ``TYP:`` PR's and issues +* `#28246 `__: BLD: better fix for clang / ARM compiles +* `#28250 `__: dtype.__repr__: prefer __name__ for user-defined types. +* `#28252 `__: test_casting_unittests.py: remove tuple +* `#28254 `__: MAINT: expire deprecations +* `#28258 `__: DOC: Change the scientific page link in NumPy/MATLAB +* `#28259 `__: TYP: Fix ``timedelta64.__divmod__`` and ``timedelta64.__mod__``... +* `#28262 `__: TYP: expire deprecations +* `#28263 `__: ENH: Add ARM64 (aarch64) CI testing +* `#28264 `__: DOC: Remove an invalid link in f2py-examples.rst +* `#28270 `__: TYP: Fixed missing typing information of set_printoptions +* `#28273 `__: CI: update sanitizer CI to use python compiled with ASAN and... +* `#28276 `__: BUG: fix incorrect bytes to StringDType coercion +* `#28279 `__: TYP: Fix scalar constructors +* `#28280 `__: TYP: stub ``numpy.matlib`` +* `#28281 `__: TYP: stub the missing ``numpy.testing`` modules +* `#28288 `__: DOC: Correct a typo in Intel License URL +* `#28290 `__: BUG: fix race initializing legacy dtype casts +* `#28291 `__: BUG: Prevent class-bound attr mutation in ``lib._iotools.NameValidator`` +* `#28294 `__: MAINT: Enable building tanh on vector length agnostic architectures +* `#28295 `__: TYP: stub ``numpy._globals`` +* `#28296 `__: TYP: stub ``numpy._expired_attrs_2_0`` +* `#28297 `__: TYP: stub ``numpy._configtool`` and ``numpy._distributor_init`` +* `#28298 `__: TYP: stub ``numpy.lib._iotools`` +* `#28299 `__: TYP: stub ``lib.user_array`` and ``lib._user_array_impl`` +* `#28300 `__: TYP: stub ``lib.introspect`` +* `#28301 `__: TYP: stub ``lib.recfunctions`` +* `#28302 `__: TYP: fix and improve ``numpy._core.arrayprint`` +* `#28303 `__: TYP: stub ``lib._datasource`` and fix ``lib._npyio_impl`` +* `#28304 `__: DOC: Remove reference to python2 +* `#28307 `__: MAINT: bump ``mypy`` to ``1.15.0`` +* `#28312 `__: DOC: remove references to Python 2 +* `#28319 `__: BUG: numpy.loadtxt reads only 50000 lines when skip_rows >= max_rows +* `#28320 `__: MAINT: Update actions/cache and use hash. +* `#28323 `__: DOC: Correct a typo in Exception TooHardError +* `#28327 `__: TYP: fix positional- and keyword-only params in ``astype``\ ,... +* `#28328 `__: CI: Update FreeBSD base image in ``cirrus_arm.yml`` +* `#28330 `__: ENH: Ensure ``lib._format_impl.read_array`` handles file reading... +* `#28332 `__: BUG: avoid segfault in np._core.multiarray.scalar +* `#28335 `__: MAINT: Update main after 2.2.3 release. +* `#28336 `__: DOC: Update link to Anaconda Eclipse/PyDev documentation +* `#28338 `__: MAINT: use OpenBLAS 0.3.29 +* `#28339 `__: MAIN: Update c,c++ line length to 88 +* `#28343 `__: BUG: Fix ``linalg.norm`` to handle empty matrices correctly. +* `#28350 `__: DOC: fix typo +* `#28353 `__: DOC: Make numpy.fft a clickable link to module +* `#28355 `__: BUG: safer bincount casting +* `#28358 `__: MAINT: No need to check for check for FPEs in casts to/from object +* `#28359 `__: DOC: Make the first paragraph more concise in internals.rst +* `#28361 `__: BUG: Make np.nonzero threading safe +* `#28370 `__: DOC: Revise bullet point formatting in ``arrays.promotions.rst`` +* `#28382 `__: DOC: fix C API docs for ``PyArray_Size`` +* `#28383 `__: DOC: Added links to CTypes and CFFI in Numba +* `#28386 `__: MAINT: Extend the default ruff exclude files +* `#28387 `__: DOC: fix expected exception from StringDType without string coercion +* `#28390 `__: MAINT: speed up slow test under TSAN +* `#28391 `__: CI: use free-threaded build for ASAN tests +* `#28392 `__: CI: build Linux aarch64 wheels on GitHub Actions +* `#28393 `__: BUG: Fix building on s390x with clang +* `#28396 `__: BUG: Fix crackfortran parsing error when a division occurs within... +* `#28404 `__: MAINT: remove legacy ucsnarrow module +* `#28406 `__: BUG: Include Python.h first +* `#28407 `__: BUG: sanity check ``__array_interface__`` number of dimensions +* `#28408 `__: DOC: Update link to Nix in Cross Compilation +* `#28411 `__: CI: use QEMU 9.2.2 for Linux Qemu tests +* `#28413 `__: DOC: add scimath in np.lib submodules listing +* `#28414 `__: DOC: Add missing punctuation to the random sampling page +* `#28415 `__: BLD: update cibuildwheel and build PyPy 3.11 wheels [wheel build] +* `#28421 `__: BUG: skip legacy dtype multithreaded test on 32 bit runners +* `#28426 `__: BUG: Limit the maximal number of bins for automatic histogram... +* `#28427 `__: DOC: remove mention of Poly.nickname +* `#28431 `__: MAINT: PY_VERSION_HEX simplify +* `#28436 `__: BUILD: move to manylinux_2_28 wheel builds +* `#28437 `__: DOC: fix documentation for Flag checking functions and macros +* `#28442 `__: ENH: Check for floating point exceptions in dot +* `#28444 `__: DOC: fix URL redirects +* `#28447 `__: DOC: repositioned bitwise_count under bit-wise operations +* `#28451 `__: DOC: Add -avx512_spr to disable AVX512 in build options +* `#28452 `__: TYP: stub ``random._pickle`` +* `#28453 `__: BUG: Fix return type of NpyIter_GetIterNext in Cython declarations +* `#28455 `__: MAINT: Bump mamba-org/setup-micromamba from 2.0.3 to 2.0.4 +* `#28456 `__: MAINT: Bump actions/cache from 4.2.0 to 4.2.2 +* `#28458 `__: MAINT: Bump actions/upload-artifact from 4.5.0 to 4.6.1 +* `#28459 `__: MAINT: Bump github/codeql-action from 3.27.9 to 3.28.11 +* `#28460 `__: MAINT: Bump astral-sh/setup-uv from 5.2.1 to 5.3.1 +* `#28461 `__: MAINT: Update dependabot.yml file +* `#28462 `__: TYP: Add specializations to meshgrid stubs +* `#28464 `__: MAINT: Bump actions/setup-python from 5.3.0 to 5.4.0 +* `#28465 `__: MAINT: Bump ossf/scorecard-action from 2.4.0 to 2.4.1 +* `#28466 `__: MAINT: Bump actions/checkout from 4.1.1 to 4.2.2 +* `#28467 `__: MAINT: Bump conda-incubator/setup-miniconda from 3.1.0 to 3.1.1 +* `#28468 `__: MAINT: Bump actions/download-artifact from 4.1.8 to 4.1.9 +* `#28473 `__: DOC: add typing badge to README +* `#28475 `__: MAINT: skip slow_pypy tests on pypy +* `#28477 `__: MAINT: fix typo in normal distribution functions docstrings +* `#28480 `__: ENH: Convert logical from C universal intrinsics to C++ using... +* `#28483 `__: DOC: only change tp_name on CPython +* `#28485 `__: MAINT: Bump actions/setup-python from 5.3.0 to 5.4.0 +* `#28488 `__: fix aarch64 CI run +* `#28489 `__: MAINT: Enable building loop_logical on vector length agnostic... +* `#28491 `__: TYP: fix typing errors in ``_core.shape_base`` +* `#28492 `__: TYP: fix typing errors in ``_core.strings`` +* `#28494 `__: TYP: fix typing errors in ``_core.records`` +* `#28495 `__: DOC: let docstring mention that unique_values is now unsorted +* `#28497 `__: TYP: don't use literals in shape-types +* `#28498 `__: TYP: accept non-integer shapes in array constructor without a... +* `#28499 `__: TYP: remove unneseccary cast +* `#28500 `__: TYP: stub ``numpy.random._bounded_integers`` +* `#28502 `__: TYP: stub ``numpy.random._common`` +* `#28503 `__: API: Always allow ``sorted=False`` and make a note about it +* `#28505 `__: TYP: stub ``numpy._core.umath`` +* `#28506 `__: TYP: fix typing errors in ``numpy.lib._arrayterator_impl`` +* `#28507 `__: MAINT: remove ``ma.timer_comparison`` +* `#28508 `__: TYP: fix signatures of ``ndarray.put`` and ``ndarray.view`` +* `#28509 `__: TYP: annotate the missing ``ufunc.resolve_dtypes`` method +* `#28511 `__: TYP: fix stubtest errors in ``numpy._core`` +* `#28513 `__: TYP: stub ``numpy._core.overrides`` +* `#28514 `__: TYP: stub ``numpy._utils`` +* `#28515 `__: TYP: stub ``numpy._core._dtype[_ctypes]`` +* `#28517 `__: TYP: stub the remaining ``numpy._core.\*`` modules +* `#28518 `__: TYP: stub the missing submodules of ``numpy.linalg`` +* `#28519 `__: TYP: stub ``numpy._pyinstaller`` +* `#28520 `__: TYP: stub ``numpy.fft.helper`` (deprecated) +* `#28522 `__: TYP: stub ``numpy.f2py`` +* `#28523 `__: TYP: annotate the missing deprecated ``row_stack`` function +* `#28524 `__: CI, TST: Bump to cibuildwheel 2.23 (Pyodide 0.27.0) for WASM... +* `#28525 `__: TYP: fix stubtest errors in ``numpy.dtype`` and ``numpy.dtypes.\*`` +* `#28526 `__: TYP: fix stubtest errors in ``timedelta64`` and ``object_`` +* `#28527 `__: TYP: fix stubtest errors in ``numpy.lib._function_base_impl`` +* `#28528 `__: TYP: fix stubtest errors in ``numpy.lib._arraysetops_impl`` +* `#28529 `__: TYP: fix stubtest errors in ``numpy.lib._index_tricks_impl`` +* `#28530 `__: TYP: fix stubtest errors in ``numpy.lib._twodim_base_impl`` +* `#28531 `__: ENH: Add Cygwin extensions to list to copy to CWD in f2py meson... +* `#28532 `__: DOC: minor editorial change +* `#28535 `__: TYP: fix stubtest errors in ``numpy._core`` +* `#28536 `__: TYP: fix stubtest errors in ``numpy._globals`` +* `#28537 `__: TYP: fix stubtest errors in ``numpy.mat[rix]lib`` +* `#28538 `__: TYP: fix stubtest errors in ``numpy.random`` +* `#28539 `__: TYP: fix stubtest errors in ``numpy.testing`` +* `#28540 `__: TYP: fix typing errors in ``numpy.ndarray`` +* `#28541 `__: TYP: fix stubtest error in ``numpy.ma`` +* `#28546 `__: MAINT: Update main after NumPy 2.2.4 release. +* `#28547 `__: MAINT: Bump pypa/cibuildwheel from 2.23.0 to 2.23.1 +* `#28555 `__: MAINT: Bump actions/download-artifact from 4.1.9 to 4.2.0 +* `#28556 `__: NEP 54: Change status to Accepted +* `#28560 `__: MAINT: Bump actions/download-artifact from 4.2.0 to 4.2.1 +* `#28561 `__: MAINT: Bump github/codeql-action from 3.28.11 to 3.28.12 +* `#28562 `__: MAINT: Bump actions/upload-artifact from 4.6.1 to 4.6.2 +* `#28563 `__: MAINT: Bump actions/cache from 4.2.2 to 4.2.3 +* `#28568 `__: MAINT: Bump astral-sh/setup-uv from 5.3.1 to 5.4.0 +* `#28569 `__: Fixing various spelling errors +* `#28571 `__: BLD: use ``manylinux_2_28:2025.03.23-1`` [wheel build] +* `#28576 `__: API,ENH: Allow forcing an array result in ufuncs +* `#28577 `__: BUG: avoid deadlocks with C++ shared mutex in dispatch cache +* `#28581 `__: MAINT: Bump github/codeql-action from 3.28.12 to 3.28.13 +* `#28586 `__: MAINT: Bump pypa/cibuildwheel from 2.23.1 to 2.23.2 +* `#28587 `__: MAINT: Bump actions/setup-python from 5.4.0 to 5.5.0 +* `#28591 `__: TYP: Type masked array shape, dtype, __int__, and __float__ +* `#28593 `__: TYP: Type ``numpy.ma.min`` +* `#28600 `__: BUG: Set writeable flag for writeable dlpacks. +* `#28601 `__: MAINT: Bump scientific-python/upload-nightly-action from 0.6.1... +* `#28607 `__: CI: fix cirrus config [wheel build] +* `#28611 `__: MAINT: Bump astral-sh/setup-uv from 5.4.0 to 5.4.1 +* `#28612 `__: TYP: Type ``ma.max`` and ``ma.ptp`` +* `#28615 `__: ENH: Upgrade Array API version to 2024.12 +* `#28616 `__: TYP: Type ``ma.MaskedArray.min`` +* `#28617 `__: MAINT: Bump actions/dependency-review-action from 4.5.0 to 4.6.0 +* `#28618 `__: MAINT, CI: Update Ubuntu to 22.04 in azure-pipelines +* `#28619 `__: ENH: Use openmp on x86-simd-sort to speed up np.sort and np.argsort +* `#28621 `__: DOC: Fix typo in ``numpy/typing/__init__.py`` +* `#28623 `__: TYP: Type ``ma.MaskedArray.max`` and ``ma.MaskedArray.ptp`` +* `#28624 `__: BUG: fix ``np.vectorize`` for object dtype +* `#28626 `__: DOC: update array API standard version in compatibility page +* `#28627 `__: MAINT: replace string.format() with f-strings +* `#28635 `__: BLD: Update vendor-meson to fix module_feature conflicts arguments... +* `#28637 `__: TYP: Fix overload for ``ma.MaskedArray.{min,max,ptp}`` and ``ma.{min,max,ptp}`` ... +* `#28638 `__: TYP: Type ``MaskedArray.{argmin, argmax}`` and ``np.ma.{argmin,``... +* `#28643 `__: BUG: fix deepcopying StringDType arrays +* `#28644 `__: TYP: fix ``ndarray.tolist()`` and ``.item()`` for unknown dtype +* `#28645 `__: DOC: fixes classes decorated with set_module not showing its... +* `#28647 `__: DOC: Fix typos found by codespell +* `#28649 `__: ENH: Improve np.linalg.det performance +* `#28653 `__: CI: Replace QEMU armhf with native (32-bit compatibility mode) +* `#28657 `__: TYP: simplified type-aliases in ``numpy._typing`` +* `#28660 `__: TYP: Accept objects that ``write()`` to ``str`` in ``savetxt`` +* `#28662 `__: MAINT: Remove distutils CPU dispatcher compatibility code +* `#28664 `__: TYP: Type ``MaskedArray.sort`` +* `#28666 `__: MAINT: Bump github/codeql-action from 3.28.13 to 3.28.14 +* `#28667 `__: TYP: replace ``_ScalarType`` with ``_SCT`` +* `#28668 `__: TYP: replace ``_ArrayType`` with ``_ArrayT`` +* `#28669 `__: TYP: default to ``dtype[Any]`` +* `#28671 `__: SIMD: Fix Highway QSort symbol linking error on aarch32/ASIMD +* `#28672 `__: MAINT: Bump github/codeql-action from 3.28.14 to 3.28.15 +* `#28674 `__: TYP: add missing ``"b1"`` literals for ``dtype[bool]`` +* `#28675 `__: TYP: fix and improve ``numpy.lib._type_check_impl`` +* `#28676 `__: TYP: fix mypy test failures +* `#28677 `__: TYP: Type ``MaskedArray.partition`` and ``MaskedArray.argpartition`` +* `#28678 `__: DEP: Deprecate ``.T`` property for non-2dim arrays and scalars +* `#28680 `__: TYP: Type ``MaskedArray.take`` and ``np.ma.take`` +* `#28684 `__: TYP: replace ``_DType`` with ``_DTypeT`` +* `#28688 `__: TYP: rename ``_ShapeType`` TypeVar to ``_ShapeT`` +* `#28689 `__: TYP: Type ``MaskedArray.__{ge,gt,le,lt}__`` +* `#28690 `__: TYP: replace ``_SCT`` with ``_ScalarT`` +* `#28693 `__: BLD: fix meson_version warning +* `#28695 `__: DOC: linalg.matrix_transpose: add alias note +* `#28699 `__: TYP: Fix false rejection of ``NDArray[object_].__abs__()`` +* `#28702 `__: TYP: Fix inconsistent ``NDArray[float64].__[r]truediv__`` return... +* `#28703 `__: MAINT: Improve float16 and float32 printing +* `#28710 `__: ENH: Improve performance for np.result_type +* `#28712 `__: MAINT: ``%i`` → ``%d`` +* `#28715 `__: TYP: Type ``np.ma.{is_masked,ndim,size,ids,iscontiguous}`` +* `#28717 `__: TYP: fix string-like ``ndarray`` rich comparison operators +* `#28719 `__: MAINT: switching from ``%i`` to ``fstrings`` +* `#28720 `__: TYP: drop py310 support +* `#28724 `__: STY: Apply assorted ruff rules (RUF) +* `#28725 `__: STY: Enforce ruff/pycodestyle warnings (W) +* `#28726 `__: STY: Apply assorted ruff/refurb rules (FURB) +* `#28728 `__: STY: Apply assorted ruff/pyupgrade rules (UP) +* `#28731 `__: BUG: Prevent nanmax/nanmin from copying memmap arrays +* `#28733 `__: TYP: remove ``_typing._UnknownType`` and ``_ArrayLikeUnknown`` +* `#28735 `__: TYP: Type ``MaskedArray.count`` and ``np.ma.count`` +* `#28738 `__: TYP: fix incorrect ``random.Generator.integers`` return type +* `#28739 `__: MNT: get rid of references to Python 3.10 +* `#28740 `__: MAINT: Bump astral-sh/setup-uv from 5.4.1 to 5.4.2 +* `#28741 `__: BUG: Re-enable overriding functions in the ``np.strings`` module. +* `#28742 `__: TYP: Type ``MaskedArray.filled`` and ``np.ma.filled`` +* `#28743 `__: MNT: Enforce ruff/pygrep-hooks rules (PGH) +* `#28744 `__: STY: Apply more ruff rules (RUF) +* `#28745 `__: TYP: Type ``MaskedArray.put``\ , ``np.ma.put``\ , ``np.ma.putmask`` +* `#28746 `__: TYP: ``numpy.ma`` squiggly line cleanup +* `#28747 `__: TYP: some ``[arg]partition`` fixes +* `#28748 `__: ENH: Support Python 3.14 +* `#28750 `__: TYP: fix ``count_nonzero`` signature +* `#28751 `__: MNT: discard Python 2 leftover +* `#28752 `__: MNT: Apply ruff/Pylint rule PLW0129 (assertions that never fail) +* `#28754 `__: MNT: Enforce ruff/Pylint Error rules (PLE) +* `#28755 `__: MNT: Apply assorted ruff/Pylint Refactor rules (PLR) +* `#28756 `__: MNT: Apply assorted ruff/Pylint Warning rules (PLW) +* `#28757 `__: BUG: Fix AVX512_SPR dispatching for SVML half-precision operations +* `#28760 `__: STY: Apply ruff/pyupgrade rule UP032 +* `#28763 `__: STY: Use f-string instead of ``format`` call +* `#28764 `__: MNT: Enforce ruff rules: Flynt (FLY) and flake8-pie (PIE) +* `#28765 `__: MNT: Enforce ruff/flake8-bugbear rules (B) +* `#28766 `__: TYP: Type ``MaskedArray.compressed`` and ``np.ma.compressed`` +* `#28768 `__: MAINT: getting rid of old ``%`` and ``.format(...)`` strings... +* `#28769 `__: ENH: Improve Floating Point Cast Performance on ARM +* `#28770 `__: MNT: Enforce ruff/pyupgrade rules (UP) +* `#28771 `__: ENH: Include offset in error message when fallocate() fails +* `#28775 `__: STY: Partially apply ruff/pycodestyle rules (E) +* `#28779 `__: MAINT: Update main after Numpy 2.2.5 release +* `#28789 `__: BUG: Re-enable GCC function-specific optimization attributes +* `#28793 `__: TYP: Type ``np.ma.allclose`` and ``np.ma.allequal`` +* `#28798 `__: TST: skip test if spawning threads triggers a RuntimeError +* `#28803 `__: MAINT: Bump github/codeql-action from 3.28.15 to 3.28.16 +* `#28804 `__: BUG: fix heap buffer overflow in np.strings.find +* `#28806 `__: BUG: Fix `` __array__(None)`` to preserve dtype +* `#28807 `__: TYP: fix ``NDArray[floating] + float`` return type +* `#28808 `__: CI: Make clang_TSAN CI job use cpython_sanity docker image +* `#28809 `__: TYP: write ``dtype[Any]`` as ``dtype`` +* `#28810 `__: TYP: replace ``_Self`` type parameters with ``typing.Self`` +* `#28811 `__: TYP: remove unnecessary scalar-type ``Any`` type-args +* `#28816 `__: MAINT: Bump actions/setup-python from 5.5.0 to 5.6.0 +* `#28817 `__: MAINT: Bump astral-sh/setup-uv from 5.4.2 to 6.0.0 +* `#28818 `__: MAINT: Bump actions/download-artifact from 4.2.1 to 4.3.0 +* `#28819 `__: TYP: simplify redundant unions of builtin scalar types +* `#28820 `__: TYP: ``None`` at the end of a union +* `#28821 `__: BUG: Use unrotated companion matrix in polynomial.polyroots. +* `#28831 `__: TYP: Fix type annotations for ``np.ma.nomask`` and ``np.ma.MaskType`` +* `#28832 `__: TYP: Type ``np.ma.getmask`` +* `#28833 `__: TYP: Type ``np.ma.is_mask`` +* `#28836 `__: ENH: Provide Windows 11 ARM64 wheels (#22530) +* `#28841 `__: BUG: Fix Clang warning in loops_half.dispatch.c.src +* `#28845 `__: TYP: Type ``MaskedArray.nonzero`` +* `#28847 `__: TYP: Use _Array1D alias in ``numpy.ma.core.pyi`` +* `#28848 `__: TYP: Type ``MaskedArray.ravel`` +* `#28849 `__: TYP: Type ``MaskedArray.repeat``\ , improve overloads for ``NDArray.repeat``\... +* `#28850 `__: TYP: Type ``MaskedArray.swapaxes`` +* `#28854 `__: MAINT: Bump pypa/cibuildwheel from 2.23.2 to 2.23.3 +* `#28855 `__: TYP: add missing ``mod`` params to ``__[r]pow__`` +* `#28856 `__: TYP: generic ``StringDType`` +* `#28857 `__: TYP: implicit ``linalg`` private submodule re-exports +* `#28858 `__: TYP: fix the ``set_module`` signature +* `#28859 `__: DOC: Replace http:// with https:// +* `#28860 `__: BLD: update vendored Meson: v1.6.1 and iOS support +* `#28862 `__: BUG: fix stringdtype singleton thread safety +* `#28863 `__: TYP: Improve consistency of (masked) array typing aliases +* `#28867 `__: TYP: Type ``MaskedArray.{__setmask__,mask,harden_mask,soften_mask,hardmask,unsha``... +* `#28868 `__: TYP: Type ``MaskedArray.{imag, real, baseclass, mT}`` +* `#28869 `__: MAINT: Bump astral-sh/setup-uv from 6.0.0 to 6.0.1 +* `#28870 `__: MNT: retire old script for SVN repositories +* `#28871 `__: MNT: retire script superseded by ruff rule W605 +* `#28872 `__: DOC: consistent and updated LICENSE files for wheels +* `#28874 `__: DOC: ``numpy.i`` will not be included as part of SWIG +* `#28876 `__: MNT: discard unused function using os.system() +* `#28877 `__: DOC: update content of cross compilation build docs +* `#28878 `__: STY: Enforce more ruff rules +* `#28879 `__: STY: Apply assorted ruff/refurb rules (FURB) +* `#28880 `__: TYP: Type ``MaskedArray.all`` and ``MaskedArray.any`` +* `#28882 `__: MAINT: address warning in SWIG tests +* `#28883 `__: MAINT: from_dlpack thread safety fixes +* `#28884 `__: DEP: deprecate ``numpy.typing.NBitBase`` +* `#28887 `__: MAINT: Bump github/codeql-action from 3.28.16 to 3.28.17 +* `#28888 `__: DOC: math mode x to \times in docstring for numpy.linalg.multi_dot +* `#28892 `__: MAINT: Avoid dereferencing/strict aliasing warnings +* `#28893 `__: TYP: remove non-existent extended-precision scalar types +* `#28898 `__: BUG: Fix missing check for PyErr_Occurred() in _pyarray_correlate. +* `#28904 `__: BLD: update vendored Meson to include iOS fix +* `#28905 `__: TYP: Test ``MaskedArray.transpose`` and ``MaskedArray.T``\ ,... +* `#28906 `__: TYP: np.argmin and np.argmax overload changes +* `#28908 `__: TYP: Fix non-existent ``CanIndex`` annotation in ``ndarray.setfield`` +* `#28912 `__: TYP: add ``float64`` overloads to ``{lin,log,geom}space`` +* `#28918 `__: DOC: Fixes absent line numbers on link to classes decorated with... +* `#28923 `__: BUG: Use string conversion defined on dtype for .str +* `#28927 `__: MAINT: Remove outdated ``MaskedArray.__div__`` and ``MaskedArray.__idiv__`` +* `#28928 `__: MNT: add support for 3.14.0b1 +* `#28929 `__: MAINT: remove py2 ``__div__`` methods from ``poly1d`` and ``ABCPolyBase`` +* `#28930 `__: MAINT: remove py2 ``__div__`` remnants from the tests +* `#28931 `__: MAINT: remove py2 ``__div__`` methods from ``lib.user_array.container`` +* `#28932 `__: MAINT: remove references to 256-bits extended precision types +* `#28933 `__: MAINT: Use consistent naming for ``numpy/typing/tests/data/fail/ma.pyi`` +* `#28934 `__: TYP, TST: improved type-testing +* `#28935 `__: MAINT: Enable ruff E251 +* `#28936 `__: TST: Prevent import error when tests are not included in the... +* `#28937 `__: CI: fix TSAN CI by using a different docker image +* `#28938 `__: MNT: clean up free-threaded CI configuration +* `#28939 `__: MAINT: Bump actions/dependency-review-action from 4.6.0 to 4.7.0 +* `#28940 `__: TYP: optional type parameters for ``ndarray`` and ``flatiter`` +* `#28941 `__: DOC: Fix titles in ``development_ghcodespaces.rst`` +* `#28945 `__: MAINT: Enable linting with ruff E501 +* `#28952 `__: MAINT: Bump actions/dependency-review-action from 4.7.0 to 4.7.1 +* `#28954 `__: MAINT: Enable linting with ruff E501 for numpy._core +* `#28956 `__: DOC: Remove references to Python 2/3 +* `#28958 `__: TYP: reject complex scalar types in ``ndarray.__ifloordiv__`` +* `#28959 `__: TYP: remove redundant ``ndarray`` inplace operator overloads +* `#28960 `__: TYP: fix mypy & pyright errors in ``np.matrix`` +* `#28961 `__: DEP: finalize removal of ``numpy.compat`` +* `#28962 `__: TYP: type-testing without the mypy plugin +* `#28963 `__: MAINT: Update ruff to 0.11.9 in linting requirements +* `#28969 `__: MNT: Enforce ruff/isort rules (I) +* `#28971 `__: MAINT: Enable linting with ruff E501 +* `#28972 `__: MNT: Get rif of ``# pylint: `` pragma controls +* `#28974 `__: MNT: Get rid of ``version: $Id`` CVS tags +* `#28975 `__: MNT: import numpy as np +* `#28976 `__: MNT: Get rid of Pyflakes / flake8 +* `#28977 `__: MNT: Enforce ruff/flake8-implicit-str-concat rules (ISC) +* `#28978 `__: MNT: Enforce ruff/pandas-vet rules (PD) +* `#28981 `__: STY: reformat the ``_typing`` imports without trailing commas +* `#28982 `__: TYP: Gradual shape type defaults +* `#28984 `__: MNT: Use isinstance() instead of comparing type() +* `#28986 `__: TYP: Type ``MaskedArray.__{iadd,isub,imul,itruediv,ifloordiv,ipow}__`` +* `#28987 `__: MNT: Align ruff pin between ``requirements/linter_requirements.txt``... +* `#28988 `__: TYP: add missing ``ndarray.__{add,mul}__`` ``character`` type... +* `#28989 `__: MAINT: Bump github/codeql-action from 3.28.17 to 3.28.18 +* `#28990 `__: Revert "DEP: Deprecate ``.T`` property for non-2dim arrays and... +* `#28993 `__: MAINT: update NPY_FEATURE_VERSION after dropping python 3.10 +* `#28994 `__: TYP: allow inplace division of ``NDArray[timedelta64]`` by floats +* `#28995 `__: TYP: remove ``from __future__ import annotations`` +* `#28998 `__: MAINT: Update main after 2.2.6 release. +* `#29002 `__: MAINT: Update download-wheels for multiple pages +* `#29006 `__: ENH: Disable the alloc cache under address and memory sanitizers +* `#29008 `__: MNT: fix CI issues on main +* `#29018 `__: MAINT: Bump larsoner/circleci-artifacts-redirector-action from... +* `#29033 `__: BUG: Fix workflow error +* `#29042 `__: MNT: constant string arrays instead of pointers in C +* `#29043 `__: BUG: Avoid compile errors in f2py modules +* `#29044 `__: BUG: Fix f2py derived types in modules +* `#29046 `__: BUG: Fix cache use regression +* `#29047 `__: REL: Prepare for the NumPy 2.3.0rc1 release [wheel build] +* `#29070 `__: TYP: Various typing fixes. +* `#29072 `__: MAINT: use pypy 3.11 nightly which has a fix for ctypeslib +* `#29073 `__: BLD: use sonoma image on Cirrus for wheel build +* `#29074 `__: BUG: add bounds-checking to in-place string multiply +* `#29082 `__: BLD: bump OpenBLAS version, use OpenBLAS for win-arm64 [wheel... +* `#29089 `__: MNT: Avoid use of deprecated _PyDict_GetItemStringWithError in... +* `#29099 `__: BUG: f2py: thread-safe forcomb (#29091) +* `#29100 `__: TYP: fix NDArray[integer] inplace operator mypy issue +* `#29101 `__: PERF: Make NpzFile member existence constant time +* `#29116 `__: MAINT: Update to vs2022 in NumPy 2.3.x [wheel build] +* `#29118 `__: MAINT: fix SPDX license expressions for LAPACK, GCC runtime libs +* `#29132 `__: MAINT: Fix for segfaults with GCC 15 + diff --git a/doc/changelog/2.3.1-changelog.rst b/doc/changelog/2.3.1-changelog.rst new file mode 100644 index 000000000000..a1c840f8beda --- /dev/null +++ b/doc/changelog/2.3.1-changelog.rst @@ -0,0 +1,34 @@ + +Contributors +============ + +A total of 9 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* Brad Smith + +* Charles Harris +* Developer-Ecosystem-Engineering +* François Rozet +* Joren Hammudoglu +* Matti Picus +* Mugundan Selvanayagam +* Nathan Goldbaum +* Sebastian Berg + +Pull requests merged +==================== + +A total of 12 pull requests were merged for this release. + +* `#29140 `__: MAINT: Prepare 2.3.x for further development +* `#29191 `__: BUG: fix matmul with transposed out arg (#29179) +* `#29192 `__: TYP: Backport typing fixes and improvements. +* `#29205 `__: BUG: Revert ``np.vectorize`` casting to legacy behavior (#29196) +* `#29222 `__: TYP: Backport typing fixes +* `#29233 `__: BUG: avoid negating unsigned integers in resize implementation... +* `#29234 `__: TST: Fix test that uses unininitialized memory (#29232) +* `#29235 `__: BUG: Address interaction between SME and FPSR (#29223) +* `#29237 `__: BUG: Enforce integer limitation in concatenate (#29231) +* `#29238 `__: CI: Add support for building NumPy with LLVM for Win-ARM64 +* `#29241 `__: ENH: Detect CPU features on OpenBSD ARM and PowerPC64 +* `#29242 `__: ENH: Detect CPU features on FreeBSD / OpenBSD RISC-V64. diff --git a/doc/changelog/2.3.2-changelog.rst b/doc/changelog/2.3.2-changelog.rst new file mode 100644 index 000000000000..5c893a510ae7 --- /dev/null +++ b/doc/changelog/2.3.2-changelog.rst @@ -0,0 +1,38 @@ + +Contributors +============ + +A total of 9 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* !DWesl +* Charles Harris +* Joren Hammudoglu +* Maanas Arora +* Marco Edward Gorelli +* Matti Picus +* Nathan Goldbaum +* Sebastian Berg +* kostayScr + + +Pull requests merged +==================== + +A total of 16 pull requests were merged for this release. + +* `#29256 `__: MAINT: Prepare 2.3.x for further development +* `#29283 `__: TYP: Work around a mypy issue with bool arrays (#29248) +* `#29284 `__: BUG: fix fencepost error in StringDType internals +* `#29287 `__: BUG: handle case in mapiter where descriptors might get replaced... +* `#29350 `__: BUG: Fix shape error path in array-interface +* `#29412 `__: BUG: Allow reading non-npy files in npz and add test +* `#29413 `__: TST: Avoid uninitialized values in test (#29341) +* `#29414 `__: BUG: Fix reference leakage for output arrays in reduction functions +* `#29415 `__: BUG: fix casting issue in center, ljust, rjust, and zfill (#29369) +* `#29416 `__: TYP: Fix overloads in ``np.char.array`` and ``np.char.asarray``... +* `#29417 `__: BUG: Any dtype should call ``square`` on ``arr \*\* 2`` (#29392) +* `#29424 `__: MAINT: use a stable pypy release in CI +* `#29425 `__: MAINT: Support python 314rc1 +* `#29429 `__: MAINT: Update highway to match main. +* `#29430 `__: BLD: use github to build macos-arm64 wheels with OpenBLAS and... +* `#29437 `__: BUG: fix datetime/timedelta hash memory leak (#29411) diff --git a/doc/changelog/2.3.3-changelog.rst b/doc/changelog/2.3.3-changelog.rst new file mode 100644 index 000000000000..0398b30072af --- /dev/null +++ b/doc/changelog/2.3.3-changelog.rst @@ -0,0 +1,50 @@ + +Contributors +============ + +A total of 13 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* Aleksandr A. Voyt + +* Bernard Roesler + +* Charles Harris +* Hunter Hogan + +* Joren Hammudoglu +* Maanas Arora +* Matti Picus +* Nathan Goldbaum +* Raghuveer Devulapalli +* Sanjay Kumar Sakamuri Kamalakar + +* Tobias Markus + +* Warren Weckesser +* Zebreus + + +Pull requests merged +==================== + +A total of 23 pull requests were merged for this release. + +* `#29440 `__: MAINT: Prepare 2.3.x for further development. +* `#29446 `__: BUG: Fix test_configtool_pkgconfigdir to resolve PKG_CONFIG_DIR... +* `#29447 `__: BLD: allow targeting webassembly without emscripten +* `#29460 `__: MAINT: Backport write_release.py +* `#29473 `__: MAINT: Bump pypa/cibuildwheel from 3.1.0 to 3.1.2 +* `#29500 `__: BUG: Always return a real dtype from linalg.cond (gh-18304) (#29333) +* `#29501 `__: MAINT: Add .file entry to all .s SVML files +* `#29556 `__: BUG: Casting from one timedelta64 to another didn't handle NAT. +* `#29562 `__: BLD: update vendored Meson to 1.8.3 [wheel build] +* `#29563 `__: BUG: Fix metadata not roundtripping when pickling datetime (#29555) +* `#29587 `__: TST: update link and version for Intel SDE download +* `#29593 `__: TYP: add ``sorted`` kwarg to ``unique`` +* `#29672 `__: MAINT: Update pythoncapi-compat from main. +* `#29673 `__: MAINT: Update cibuildwheel. +* `#29674 `__: MAINT: Fix typo in wheels.yml +* `#29683 `__: BUG, BLD: Correct regex for ppc64 VSX3/VSX4 feature detection +* `#29684 `__: TYP: ndarray.fill() takes no keyword arguments +* `#29685 `__: BUG: avoid thread-unsafe refcount check in temp elision +* `#29687 `__: CI: replace comment-hider action in mypy_primer workflow +* `#29689 `__: BLD: Add missing include +* `#29691 `__: BUG: use correct input dtype in flatiter assignment +* `#29700 `__: TYP: fix np.bool method declarations +* `#29701 `__: BUG: Correct ambiguous logic for s390x CPU feature detection + diff --git a/doc/changelog/2.3.4-changelog.rst b/doc/changelog/2.3.4-changelog.rst new file mode 100644 index 000000000000..f94b46a07573 --- /dev/null +++ b/doc/changelog/2.3.4-changelog.rst @@ -0,0 +1,61 @@ + +Contributors +============ + +A total of 17 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* !DWesl +* Charles Harris +* Christian Barbia + +* Evgeni Burovski +* Joren Hammudoglu +* Maaz + +* Mateusz SokÃŗÅ‚ +* Matti Picus +* Nathan Goldbaum +* Ralf Gommers +* Riku Sakamoto + +* Sandeep Gupta + +* Sayed Awad +* Sebastian Berg +* Sergey Fedorov + +* Warren Weckesser +* dependabot[bot] + +Pull requests merged +==================== + +A total of 30 pull requests were merged for this release. + +* `#29725 `__: MAINT: Prepare 2.3.x for further development +* `#29781 `__: MAINT: Pin some upstream dependences +* `#29782 `__: BLD: enable x86-simd-sort to build on KNL with -mavx512f +* `#29783 `__: BUG: Include python-including headers first (#29281) +* `#29784 `__: TYP: fix np.number and np.\*integer method declaration +* `#29785 `__: TYP: mypy 1.18.1 +* `#29788 `__: TYP: replace scalar type __init__ with __new__ +* `#29790 `__: BUG: Fix ``dtype`` refcount in ``__array__`` (#29715) +* `#29791 `__: TYP: fix method declarations in floating, timedelta64, and datetime64Backport +* `#29792 `__: MAINT: delete unused variables in unary logical dispatch +* `#29797 `__: BUG: Fix pocketfft umath strides for AIX compatibility (#29768) +* `#29798 `__: BUG: np.setbufsize should raise ValueError for negative input +* `#29799 `__: BUG: Fix assert in nditer buffer setup +* `#29800 `__: BUG: Stable ScalarType ordering +* `#29838 `__: TST: Pin pyparsing to avoid matplotlib errors. +* `#29839 `__: BUG: linalg: emit a MemoryError on a malloc failure (#29811) +* `#29840 `__: BLD: change file extension for libnpymath on win-arm64 from .a... +* `#29864 `__: CI: Fix loongarch64 CI (#29856) +* `#29865 `__: TYP: Various typing fixes +* `#29910 `__: BUG: Fix float16-sort failures on 32-bit x86 MSVC (#29908) +* `#29911 `__: TYP: add missing ``__slots__`` (#29901) +* `#29913 `__: TYP: wrong argument defaults in ``testing._private`` (#29902) +* `#29920 `__: BUG: avoid segmentation fault in string_expandtabs_length_promoter +* `#29921 `__: BUG: Fix INT_MIN % -1 to return 0 for all signed integer types... +* `#29922 `__: TYP: minor fixes related to ``errstate`` (#29914) +* `#29923 `__: TST: use requirements/test_requirements across CI (#29919) +* `#29926 `__: BUG: fix negative samples generated by Wald distribution (#29609) +* `#29940 `__: MAINT: Bump pypa/cibuildwheel from 3.1.4 to 3.2.1 +* `#29949 `__: STY: rename @classmethod arg to cls +* `#29950 `__: MAINT: Simplify string arena growth strategy (#29885) + diff --git a/doc/changelog/2.3.5-changelog.rst b/doc/changelog/2.3.5-changelog.rst new file mode 100644 index 000000000000..123e1e9d0453 --- /dev/null +++ b/doc/changelog/2.3.5-changelog.rst @@ -0,0 +1,40 @@ + +Contributors +============ + +A total of 10 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* Aaron Kollasch + +* Charles Harris +* Joren Hammudoglu +* Matti Picus +* Nathan Goldbaum +* Rafael Laboissière + +* Sayed Awad +* Sebastian Berg +* Warren Weckesser +* Yasir Ashfaq + + +Pull requests merged +==================== + +A total of 16 pull requests were merged for this release. + +* `#29979 `__: MAINT: Prepare 2.3.x for further development +* `#30026 `__: SIMD, BLD: Backport FPMATH mode on x86-32 and filter successor... +* `#30029 `__: MAINT: Backport write_release.py +* `#30041 `__: TYP: Various typing updates +* `#30059 `__: BUG: Fix np.strings.slice if stop=None or start and stop >= len... +* `#30063 `__: BUG: Fix np.strings.slice if start > stop +* `#30076 `__: BUG: avoid negating INT_MIN in PyArray_Round implementation (#30071) +* `#30090 `__: BUG: Fix resize when it contains references (#29970) +* `#30129 `__: BLD: update scipy-openblas, use -Dpkg_config_path (#30049) +* `#30130 `__: BUG: Avoid compilation error of wrapper file generated with SWIG... +* `#30157 `__: BLD: use scipy-openblas 0.3.30.7 (#30132) +* `#30158 `__: DOC: Remove nonexistent ``order`` parameter docs of ``ma.asanyarray``... +* `#30185 `__: BUG: Fix check of PyMem_Calloc return value. (#30176) +* `#30217 `__: DOC: fix links for newly rebuilt numpy-tutorials site +* `#30218 `__: BUG: Fix build on s390x with clang (#30214) +* `#30237 `__: ENH: Make FPE blas check a runtime check for all apple arm systems + diff --git a/doc/neps/conf.py b/doc/neps/conf.py index 33faaf17ff64..056002135dbd 100644 --- a/doc/neps/conf.py +++ b/doc/neps/conf.py @@ -39,10 +39,10 @@ templates_path = ['../source/_templates/'] # The suffix(es) of source filenames. -# You can specify multiple suffix as a list of string: +# You can specify multiple suffix as a dict mapping suffixes to parsers: # -# source_suffix = ['.rst', '.md'] -source_suffix = '.rst' +# source_suffix = {'.rst': 'restructuredtext', '.md': 'markdown'} +source_suffix = {'.rst': 'restructuredtext'} # The master toctree document. master_doc = 'content' diff --git a/doc/neps/nep-0021-advanced-indexing.rst b/doc/neps/nep-0021-advanced-indexing.rst index badd41875af2..11ef238d6179 100644 --- a/doc/neps/nep-0021-advanced-indexing.rst +++ b/doc/neps/nep-0021-advanced-indexing.rst @@ -123,7 +123,7 @@ with shape ``(1,)``, not a 2D sub-matrix with shape ``(1, 1)``. Mixed indexing seems so tricky that it is tempting to say that it never should be used. However, it is not easy to avoid, because NumPy implicitly adds full slices if there are fewer indices than the full dimensionality of the indexed -array. This means that indexing a 2D array like `x[[0, 1]]`` is equivalent to +array. This means that indexing a 2D array like ``x[[0, 1]]`` is equivalent to ``x[[0, 1], :]``. These cases are not surprising, but they constrain the behavior of mixed indexing. @@ -236,7 +236,7 @@ be deduced: For the beginning, this probably means cases where ``arr[ind]`` and ``arr.oindex[ind]`` return different results give deprecation warnings. This includes every use of vectorized indexing with multiple integer arrays. - Due to the transposing behaviour, this means that``arr[0, :, index_arr]`` + Due to the transposing behaviour, this means that ``arr[0, :, index_arr]`` will be deprecated, but ``arr[:, 0, index_arr]`` will not for the time being. 7. To ensure that existing subclasses of `ndarray` that override indexing @@ -285,7 +285,7 @@ Open Questions Copying always "fixes" this possible inconsistency. * The final state to morph plain indexing in is not fixed in this PEP. - It is for example possible that `arr[index]`` will be equivalent to + It is for example possible that ``arr[index]`` will be equivalent to ``arr.oindex`` at some point in the future. Since such a change will take years, it seems unnecessary to make specific decisions at this time. diff --git a/doc/neps/nep-0030-duck-array-protocol.rst b/doc/neps/nep-0030-duck-array-protocol.rst index 7fb8c9734900..4a3d268697a2 100644 --- a/doc/neps/nep-0030-duck-array-protocol.rst +++ b/doc/neps/nep-0030-duck-array-protocol.rst @@ -102,14 +102,14 @@ a complete implementation would look like the following: The implementation above exemplifies the simplest case, but the overall idea is that libraries will implement a ``__duckarray__`` method that returns the original object, and an ``__array__`` method that either creates and returns an -appropriate NumPy array, or raises a``TypeError`` to prevent unintentional use +appropriate NumPy array, or raises a ``TypeError`` to prevent unintentional use as an object in a NumPy array (if ``np.asarray`` is called on an arbitrary object that does not implement ``__array__``, it will create a NumPy array scalar). In case of existing libraries that don't already implement ``__array__`` but would like to use duck array typing, it is advised that they introduce -both ``__array__`` and``__duckarray__`` methods. +both ``__array__`` and ``__duckarray__`` methods. Usage ----- diff --git a/doc/neps/nep-0038-SIMD-optimizations.rst b/doc/neps/nep-0038-SIMD-optimizations.rst index eb1157342948..445c008a76c3 100644 --- a/doc/neps/nep-0038-SIMD-optimizations.rst +++ b/doc/neps/nep-0038-SIMD-optimizations.rst @@ -8,7 +8,7 @@ NEP 38 — Using SIMD optimization instructions for performance :Status: Final :Type: Standards :Created: 2019-11-25 -:Resolution: https://mail.python.org/archives/list/numpy-discussion@python.org/thread/PVWJ74UVBRZ5ZWF6MDU7EUSJXVNILAQB/#PVWJ74UVBRZ5ZWF6MDU7EUSJXVNILAQB +:Resolution: `NumPy Discussion `_ Abstract diff --git a/doc/neps/nep-0049.rst b/doc/neps/nep-0049-data-allocation-strategies.rst similarity index 98% rename from doc/neps/nep-0049.rst rename to doc/neps/nep-0049-data-allocation-strategies.rst index 180cfea17156..ec18f7a315d9 100644 --- a/doc/neps/nep-0049.rst +++ b/doc/neps/nep-0049-data-allocation-strategies.rst @@ -8,8 +8,7 @@ NEP 49 — Data allocation strategies :Status: Final :Type: Standards Track :Created: 2021-04-18 -:Resolution: https://mail.python.org/archives/list/numpy-discussion@python.org/thread/YZ3PNTXZUT27B6ITFAD3WRSM3T3SRVK4/#PKYXCTG4R5Q6LIRZC4SEWLNBM6GLRF26 - +:Resolution: `NumPy Discussion `_ Abstract -------- diff --git a/doc/neps/nep-0050-scalar-promotion.rst b/doc/neps/nep-0050-scalar-promotion.rst index aa04dd2c740e..974f6691d363 100644 --- a/doc/neps/nep-0050-scalar-promotion.rst +++ b/doc/neps/nep-0050-scalar-promotion.rst @@ -509,9 +509,9 @@ will be ignored. This means, that operations will never silently use the The user will have to write one of:: np.array([3]) + np.array(2**100) - np.array([3]) + np.array(2**100, dtype=object) + np.array([3]) + np.array(2**100, dtype=np.object_) -As such implicit conversion to ``object`` should be rare and the work-around +As such implicit conversion to ``object_`` should be rare and the work-around is clear, we expect that the backwards compatibility concerns are fairly small. diff --git a/doc/neps/nep-0055-string_dtype.rst b/doc/neps/nep-0055-string_dtype.rst index 7e29e1425e8c..555052fa16f7 100644 --- a/doc/neps/nep-0055-string_dtype.rst +++ b/doc/neps/nep-0055-string_dtype.rst @@ -224,7 +224,7 @@ to fixed-width unicode arrays:: In [3]: data = [str(i) * 10 for i in range(100_000)] - In [4]: %timeit arr_object = np.array(data, dtype=object) + In [4]: %timeit arr_object = np.array(data, dtype=np.object_) 3.15 ms Âą 74.4 Âĩs per loop (mean Âą std. dev. of 7 runs, 100 loops each) In [5]: %timeit arr_stringdtype = np.array(data, dtype=StringDType()) @@ -242,7 +242,7 @@ for strings, the string loading performance of ``StringDType`` should improve. String operations have similar performance:: - In [7]: %timeit np.array([s.capitalize() for s in data], dtype=object) + In [7]: %timeit np.array([s.capitalize() for s in data], dtype=np.object_) 31.6 ms Âą 728 Âĩs per loop (mean Âą std. dev. of 7 runs, 10 loops each) In [8]: %timeit np.char.capitalize(arr_stringdtype) diff --git a/doc/neps/scope.rst b/doc/neps/scope.rst index 93887c4b12ff..ffa3d8655ad8 100644 --- a/doc/neps/scope.rst +++ b/doc/neps/scope.rst @@ -36,10 +36,10 @@ Here, we describe aspects of N-d array computation that are within scope for Num - NumPy provides some **infrastructure for other packages in the scientific Python ecosystem**: - - numpy.distutils (build support for C++, Fortran, BLAS/LAPACK, and other - relevant libraries for scientific computing) + - numpy.distutils (removed in NumPy 2.5.0, was providing build support for C++, Fortran, + BLAS/LAPACK, and other relevant libraries for scientific computing) - f2py (generating bindings for Fortran code) - - testing utilities + - testing utilities (mostly deprecated, pytest does a good job) - **Speed**: we take performance concerns seriously and aim to execute operations on large arrays with similar performance as native C diff --git a/doc/preprocess.py b/doc/preprocess.py index b2e64ab6393a..bc43e89764f8 100755 --- a/doc/preprocess.py +++ b/doc/preprocess.py @@ -4,7 +4,7 @@ def main(): - doxy_gen(os.path.abspath(os.path.join('..'))) + doxy_gen(os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))) def doxy_gen(root_path): """ @@ -24,6 +24,7 @@ def doxy_gen(root_path): class DoxyTpl(Template): delimiter = '@' + def doxy_config(root_path): """ Fetch all Doxygen sub-config files and gather it with the main config file. @@ -35,13 +36,14 @@ def doxy_config(root_path): conf = DoxyTpl(fd.read()) confs.append(conf.substitute(CUR_DIR=dsrc_path, **sub)) - for dpath, _, files in os.walk(root_path): - if ".doxyfile" not in files: - continue - conf_path = os.path.join(dpath, ".doxyfile") - with open(conf_path) as fd: - conf = DoxyTpl(fd.read()) - confs.append(conf.substitute(CUR_DIR=dpath, **sub)) + for subdir in ["doc", "numpy"]: + for dpath, _, files in os.walk(os.path.join(root_path, subdir)): + if ".doxyfile" not in files: + continue + conf_path = os.path.join(dpath, ".doxyfile") + with open(conf_path) as fd: + conf = DoxyTpl(fd.read()) + confs.append(conf.substitute(CUR_DIR=dpath, **sub)) return confs diff --git a/doc/release/upcoming_changes/26018.change.rst b/doc/release/upcoming_changes/26018.change.rst deleted file mode 100644 index 9d7c139be183..000000000000 --- a/doc/release/upcoming_changes/26018.change.rst +++ /dev/null @@ -1,7 +0,0 @@ -``unique_values`` may return unsorted data ------------------------------------------- -The relatively new function (added in NumPy 2.0) ``unique_values`` may now -return unsorted results. Just as ``unique_counts`` and ``unique_all`` -these never guaranteed a sorted result, however, the result -was sorted until now. In cases where these do return a sorted result, this -may change in future releases to improve performance. diff --git a/doc/release/upcoming_changes/26018.performance.rst b/doc/release/upcoming_changes/26018.performance.rst deleted file mode 100644 index ffeab51dbdf6..000000000000 --- a/doc/release/upcoming_changes/26018.performance.rst +++ /dev/null @@ -1,7 +0,0 @@ -Performance improvements to ``np.unique`` ------------------------------------------ -``np.unique`` now tries to use a hash table to find unique values instead of sorting -values before finding unique values. This is limited to certain dtypes for now, and -the function is now faster for those dtypes. The function now also exposes a ``sorted`` -parameter to allow returning unique values as they were found, instead of sorting them -afterwards. \ No newline at end of file diff --git a/doc/release/upcoming_changes/26745.highlight.rst b/doc/release/upcoming_changes/26745.highlight.rst deleted file mode 100644 index 5636f919c80d..000000000000 --- a/doc/release/upcoming_changes/26745.highlight.rst +++ /dev/null @@ -1,10 +0,0 @@ -Interactive examples in the NumPy documentation ------------------------------------------------ - -The NumPy documentation includes a number of examples that -can now be run interactively in your browser using WebAssembly -and Pyodide. - -Please note that the examples are currently experimental in -nature and may not work as expected for all methods in the -public API. diff --git a/doc/release/upcoming_changes/27288.improvement.rst b/doc/release/upcoming_changes/27288.improvement.rst deleted file mode 100644 index c7319554c63f..000000000000 --- a/doc/release/upcoming_changes/27288.improvement.rst +++ /dev/null @@ -1,3 +0,0 @@ -* Scalar comparisons between non-comparable dtypes such as - `np.array(1) == np.array('s')` now return a NumPy bool instead of - a Python bool. \ No newline at end of file diff --git a/doc/release/upcoming_changes/27789.new_function.rst b/doc/release/upcoming_changes/27789.new_function.rst deleted file mode 100644 index 734a0c3bc2b5..000000000000 --- a/doc/release/upcoming_changes/27789.new_function.rst +++ /dev/null @@ -1,5 +0,0 @@ -New function `numpy.strings.slice` ----------------------------------- -The new function `numpy.strings.slice` was added, which implements fast -native slicing of string arrays. It supports the full slicing API including -negative slice offsets and steps. \ No newline at end of file diff --git a/doc/release/upcoming_changes/27883.c_api.rst b/doc/release/upcoming_changes/27883.c_api.rst deleted file mode 100644 index 107e0036c5c2..000000000000 --- a/doc/release/upcoming_changes/27883.c_api.rst +++ /dev/null @@ -1,4 +0,0 @@ -* `NpyIter_GetTransferFlags` is now available to check if - the iterator needs the Python API or if casts may cause floating point - errors (FPE). FPEs can for example be set when casting ``float64(1e300)`` - to ``float32`` (overflow to infinity) or a NaN to an integer (invalid value). \ No newline at end of file diff --git a/doc/release/upcoming_changes/27883.change.rst b/doc/release/upcoming_changes/27883.change.rst deleted file mode 100644 index ea68771efba3..000000000000 --- a/doc/release/upcoming_changes/27883.change.rst +++ /dev/null @@ -1,17 +0,0 @@ -Changes to the main iterator and potential numerical changes ------------------------------------------------------------- -The main iterator, used in math functions and via ``np.nditer`` from Python -and ``NpyIter`` in C, now behaves differently for some buffered iterations. -This means that: - -* The buffer size used will often be smaller than the maximum buffer sized - allowed by the ``buffersize`` parameter. -* The "growinner" flag is now honored with buffered reductions when no operand - requires buffering. - -For ``np.sum()`` such changes in buffersize may slightly change numerical -results of floating point operations. -Users who use "growinner" for custom reductions could notice -changes in precision (for example, in NumPy we removed it from -``einsum`` to avoid most precision changes and improve precision -for some 64bit floating point inputs). diff --git a/doc/release/upcoming_changes/27998.c_api.rst b/doc/release/upcoming_changes/27998.c_api.rst deleted file mode 100644 index edc6371af1f9..000000000000 --- a/doc/release/upcoming_changes/27998.c_api.rst +++ /dev/null @@ -1,10 +0,0 @@ -New `NpyIter_GetTransferFlags` and ``NpyIter_IterationNeedsAPI`` change ------------------------------------------------------------------------ -NumPy now has the new `NpyIter_GetTransferFlags` function as a more precise -way checking of iterator/buffering needs. I.e. whether the Python API/GIL is -required or floating point errors may occur. -This function is also faster if you already know your needs without buffering. - -The ``NpyIter_IterationNeedsAPI`` function now performs all the checks that were -previously performed at setup time. While it was never necessary to call it -multiple times, doing so will now have a larger cost. diff --git a/doc/release/upcoming_changes/28080.c_api.rst b/doc/release/upcoming_changes/28080.c_api.rst deleted file mode 100644 index f72be7ef52fe..000000000000 --- a/doc/release/upcoming_changes/28080.c_api.rst +++ /dev/null @@ -1 +0,0 @@ -* ``NpyIter`` now has no limit on the number of operands it supports. diff --git a/doc/release/upcoming_changes/28080.improvement.rst b/doc/release/upcoming_changes/28080.improvement.rst deleted file mode 100644 index 19b85ae3c96a..000000000000 --- a/doc/release/upcoming_changes/28080.improvement.rst +++ /dev/null @@ -1,2 +0,0 @@ -* ``np.nditer`` now has no limit on the number of supported operands - (C-integer). diff --git a/doc/release/upcoming_changes/28102.change.rst b/doc/release/upcoming_changes/28102.change.rst deleted file mode 100644 index bd54378a652e..000000000000 --- a/doc/release/upcoming_changes/28102.change.rst +++ /dev/null @@ -1,6 +0,0 @@ -The minimum supported GCC version is now 9.3.0 ----------------------------------------------- -The minimum supported version was updated from 8.4.0 to 9.3.0, -primarily in order to reduce the chance of platform-specific bugs in old GCC -versions from causing issues. - diff --git a/doc/release/upcoming_changes/28105.improvement.rst b/doc/release/upcoming_changes/28105.improvement.rst deleted file mode 100644 index 537467575234..000000000000 --- a/doc/release/upcoming_changes/28105.improvement.rst +++ /dev/null @@ -1,2 +0,0 @@ -* No-copy pickling is now supported for any - array that can be transposed to a C-contiguous array. \ No newline at end of file diff --git a/doc/release/upcoming_changes/28129.deprecation.rst b/doc/release/upcoming_changes/28129.deprecation.rst deleted file mode 100644 index b1beb0c5cca3..000000000000 --- a/doc/release/upcoming_changes/28129.deprecation.rst +++ /dev/null @@ -1,4 +0,0 @@ -* The ``numpy.typing.mypy_plugin`` has been deprecated in favor of platform-agnostic - static type inference. Please remove ``numpy.typing.mypy_plugin`` from the ``plugins`` - section of your mypy configuration. If this change results in new errors being - reported, kindly open an issue. diff --git a/doc/release/upcoming_changes/28205.improvement.rst b/doc/release/upcoming_changes/28205.improvement.rst deleted file mode 100644 index 42eaaac98363..000000000000 --- a/doc/release/upcoming_changes/28205.improvement.rst +++ /dev/null @@ -1,6 +0,0 @@ -Added warnings to `np.isclose` ---------------------------------- -Added warning messages if at least one of atol or rtol are -either `np.nan` or `np.inf` within `np.isclose` - -* Warnings follow the user's `np.seterr` settings diff --git a/doc/release/upcoming_changes/28214.new_feature.rst b/doc/release/upcoming_changes/28214.new_feature.rst deleted file mode 100644 index eb95a0739e79..000000000000 --- a/doc/release/upcoming_changes/28214.new_feature.rst +++ /dev/null @@ -1,23 +0,0 @@ -NumPy now registers its pkg-config paths with the pkgconf_ PyPI package ------------------------------------------------------------------------ - -The pkgconf_ PyPI package provides an interface for projects like NumPy to -register their own paths to be added to the pkg-config search path. This means -that when using pkgconf_ from PyPI, NumPy will be discoverable without needing -for any custom environment configuration. - -.. attention:: Attention - - This only applies when using the pkgconf_ package from PyPI_, or put another - way, this only applies when installing pkgconf_ via a Python package - manager. - - If you are using ``pkg-config`` or ``pkgconf`` provided by your system, or - any other source that does not use the pkgconf-pypi_ project, the NumPy - pkg-config directory will not be automatically added to the search path. In - these situations, you might want to use ``numpy-config``. - - -.. _pkgconf: https://github.com/pypackaging-native/pkgconf-pypi -.. _PyPI: https://pypi.org/ -.. _pkgconf-pypi: https://github.com/pypackaging-native/pkgconf-pypi diff --git a/doc/release/upcoming_changes/28250.improvement.rst b/doc/release/upcoming_changes/28250.improvement.rst deleted file mode 100644 index 703a8bb0c2e1..000000000000 --- a/doc/release/upcoming_changes/28250.improvement.rst +++ /dev/null @@ -1,3 +0,0 @@ -* The ``__repr__`` for user-defined dtypes now prefers the ``__name__`` of the - custom dtype over a more generic name constructed from its ``kind`` and - ``itemsize``. diff --git a/doc/release/upcoming_changes/28254.expired.rst b/doc/release/upcoming_changes/28254.expired.rst deleted file mode 100644 index 5f391eb6cbe2..000000000000 --- a/doc/release/upcoming_changes/28254.expired.rst +++ /dev/null @@ -1,29 +0,0 @@ -* Remove deprecated macros like ``NPY_OWNDATA`` from cython interfaces in favor of ``NPY_ARRAY_OWNDATA`` (deprecated since 1.7) -* Remove ``numpy/npy_1_7_deprecated_api.h`` and C macros like ``NPY_OWNDATA`` in favor of ``NPY_ARRAY_OWNDATA`` (deprecated since 1.7) -* Remove alias ``generate_divbyzero_error`` to ``npy_set_floatstatus_divbyzero`` and ``generate_overflow_error`` to ``npy_set_floatstatus_overflow`` (deprecated since 1.10) -* Remove ``np.tostring`` (deprecated since 1.19) -* Raise on ``np.conjugate`` of non-numeric types (deprecated since 1.13) -* Raise when using ``np.bincount(...minlength=None)``, use 0 instead (deprecated since 1.14) -* Passing ``shape=None`` to functions with a non-optional shape argument errors, use ``()`` instead (deprecated since 1.20) -* Inexact matches for ``mode`` and ``searchside`` raise (deprecated since 1.20) -* Setting ``__array_finalize__ = None`` errors (deprecated since 1.23) -* ``np.fromfile`` and ``np.fromstring`` error on bad data, previously they would guess (deprecated since 1.18) -* ``datetime64`` and ``timedelta64`` construction with a tuple no longer accepts an ``event`` value, either use a two-tuple of (unit, num) or a 4-tuple of (unit, num, den, 1) (deprecated since 1.14) -* When constructing a ``dtype`` from a class with a ``dtype`` attribute, that attribute must be a dtype-instance rather than a thing that can be parsed as a dtype instance (deprecated in 1.19). At some point the whole construct of using a dtype attribute will be deprecated (see #25306) -* Passing booleans as partition index errors (deprecated since 1.23) -* Out-of-bounds indexes error even on empty arrays (deprecated since 1.20) -* ``np.tostring`` has been removed, use ``tobytes`` instead (deprecated since 1.19) -* Disallow make a non-writeable array writeable for arrays with a base that do not own their data (deprecated since 1.17) -* ``concatenate()`` with ``axis=None`` uses ``same-kind`` casting by default, not ``unsafe`` (deprecated since 1.20) -* Unpickling a scalar with object dtype errors (deprecated since 1.20) -* The binary mode of ``fromstring`` now errors, use ``frombuffer`` instead (deprecated since 1.14) -* Converting ``np.inexact`` or ``np.floating`` to a dtype errors (deprecated since 1.19) -* Converting ``np.complex``, ``np.integer``, ``np.signedinteger``, ``np.unsignedinteger``, ``np.generic`` to a dtype errors (deprecated since 1.19) -* The Python built-in ``round`` errors for complex scalars. Use ``np.round`` or ``scalar.round`` instead (deprecated since 1.19) -* 'np.bool' scalars can no longer be interpreted as an index (deprecated since 1.19) -* Parsing an integer via a float string is no longer supported. (deprecated since 1.23) To avoid this error you can - * make sure the original data is stored as integers. - * use the ``converters=float`` keyword argument. - * Use ``np.loadtxt(...).astype(np.int64)`` -* The use of a length 1 tuple for the ufunc ``signature`` errors. Use ``dtype`` or fill the tuple with ``None`` (deprecated since 1.19) -* Special handling of matrix is in np.outer is removed. Convert to a ndarray via ``matrix.A`` (deprecated since 1.20) diff --git a/doc/release/upcoming_changes/28343.change.rst b/doc/release/upcoming_changes/28343.change.rst deleted file mode 100644 index 378ef775b62e..000000000000 --- a/doc/release/upcoming_changes/28343.change.rst +++ /dev/null @@ -1 +0,0 @@ -* The vector norm ``ord=inf`` and the matrix norms ``ord={1, 2, inf, 'nuc'}`` now always returns zero for empty arrays. Empty arrays have at least one axis of size zero. This affects `np.linalg.norm`, `np.linalg.vector_norm`, and `np.linalg.matrix_norm`. Previously, NumPy would raises errors or return zero depending on the shape of the array. diff --git a/doc/release/upcoming_changes/28426.change.rst b/doc/release/upcoming_changes/28426.change.rst deleted file mode 100644 index d1c48640eed0..000000000000 --- a/doc/release/upcoming_changes/28426.change.rst +++ /dev/null @@ -1,6 +0,0 @@ -Changes to automatic bin selection in numpy.histogram ------------------------------------------------------ -The automatic bin selection algorithm in ``numpy.histogram`` has been modified -to avoid out-of-memory errors for samples with low variation. -For full control over the selected bins the user can use set -the ``bin`` or ``range`` parameters of ``numpy.histogram``. diff --git a/doc/release/upcoming_changes/28436.change.rst b/doc/release/upcoming_changes/28436.change.rst deleted file mode 100644 index 60149e55a4d0..000000000000 --- a/doc/release/upcoming_changes/28436.change.rst +++ /dev/null @@ -1,10 +0,0 @@ -Build manylinux_2_28 wheels ---------------------------- - -Wheels for linux systems will use the ``manylinux_2_28`` tag (instead of the ``manylinux2014`` tag), which means -dropping support for redhat7/centos7, amazonlinux2, debian9, ubuntu18.04, and -other pre-glibc2.28 operating system versions, as per the `PEP 600 support -table`_. - -.. _`PEP 600 support table`: https://github.com/mayeut/pep600_compliance?tab=readme-ov-file#pep600-compliance-check - diff --git a/doc/release/upcoming_changes/28442.improvement.rst b/doc/release/upcoming_changes/28442.improvement.rst deleted file mode 100644 index 16d71bde19c5..000000000000 --- a/doc/release/upcoming_changes/28442.improvement.rst +++ /dev/null @@ -1 +0,0 @@ -* ``np.dot`` now reports floating point exceptions. diff --git a/doc/release/upcoming_changes/28569.change.rst b/doc/release/upcoming_changes/28569.change.rst deleted file mode 100644 index f9d26fda0484..000000000000 --- a/doc/release/upcoming_changes/28569.change.rst +++ /dev/null @@ -1,2 +0,0 @@ -* A spelling error in the error message returned when converting a string to a float with the - method ``np.format_float_positional`` has been fixed. diff --git a/doc/release/upcoming_changes/28576.new_feature.rst b/doc/release/upcoming_changes/28576.new_feature.rst deleted file mode 100644 index 2c50887a49f2..000000000000 --- a/doc/release/upcoming_changes/28576.new_feature.rst +++ /dev/null @@ -1,15 +0,0 @@ -Allow ``out=...`` in ufuncs to ensure array result --------------------------------------------------- -NumPy has the sometimes difficult behavior that it currently usually -returns scalars rather than 0-D arrays (even if the inputs were 0-D arrays). -This is especially problematic for non-numerical dtypes (e.g. ``object``). - -For ufuncs (i.e. most simple math functions) it is now possible -to use ``out=...`` (literally `...`, e.g. ``out=Ellipsis``) which is identical in behavior to ``out`` not -being passed, but will ensure a non-scalar return. -This spelling is borrowed from ``arr1d[0, ...]`` where the ``...`` -also ensures a non-scalar return. - -Other functions with an ``out=`` kwarg should gain support eventually. -Downstream libraries that interoperate via ``__array_ufunc__`` or -``__array_function__`` may need to adapt to support this. diff --git a/doc/release/upcoming_changes/28615.change.rst b/doc/release/upcoming_changes/28615.change.rst deleted file mode 100644 index 58b751e40704..000000000000 --- a/doc/release/upcoming_changes/28615.change.rst +++ /dev/null @@ -1,5 +0,0 @@ -* NumPy's ``__array_api_version__`` was upgraded from ``2023.12`` to ``2024.12``. -* `numpy.count_nonzero` for ``axis=None`` (default) now returns a NumPy scalar - instead of a Python integer. -* The parameter ``axis`` in `numpy.take_along_axis` function has now a default - value of ``-1``. diff --git a/doc/release/upcoming_changes/28619.highlight.rst b/doc/release/upcoming_changes/28619.highlight.rst deleted file mode 100644 index 6c296b92899e..000000000000 --- a/doc/release/upcoming_changes/28619.highlight.rst +++ /dev/null @@ -1,6 +0,0 @@ -Building NumPy with OpenMP Parallelization -------------------------------------------- -NumPy now supports OpenMP parallel processing capabilities when built with the -``-Denable_openmp=true`` Meson build flag. This feature is disabled by default. -When enabled, ``np.sort`` and ``np.argsort`` functions can utilize OpenMP for -parallel thread execution, improving performance for these operations. diff --git a/doc/release/upcoming_changes/28619.performance.rst b/doc/release/upcoming_changes/28619.performance.rst deleted file mode 100644 index 904decbe0ba6..000000000000 --- a/doc/release/upcoming_changes/28619.performance.rst +++ /dev/null @@ -1,7 +0,0 @@ -Performance improvements to ``np.sort`` and ``np.argsort`` ----------------------------------------------------------- -``np.sort`` and ``np.argsort`` functions now can leverage OpenMP for parallel -thread execution, resulting in up to 3.5x speedups on x86 architectures with -AVX2 or AVX-512 instructions. This opt-in feature requires NumPy to be built -with the -Denable_openmp Meson flag. Users can control the number of threads -used by setting the OMP_NUM_THREADS environment variable. diff --git a/doc/release/upcoming_changes/28669.new_feature.rst b/doc/release/upcoming_changes/28669.new_feature.rst deleted file mode 100644 index 2953a5123ccc..000000000000 --- a/doc/release/upcoming_changes/28669.new_feature.rst +++ /dev/null @@ -1,3 +0,0 @@ -* The type parameter of ``np.dtype`` now defaults to ``typing.Any``. - This way, static type-checkers will infer ``dtype: np.dtype`` as - ``dtype: np.dtype[Any]``, without reporting an error. diff --git a/doc/release/upcoming_changes/28703.change.rst b/doc/release/upcoming_changes/28703.change.rst deleted file mode 100644 index 87bb431951f9..000000000000 --- a/doc/release/upcoming_changes/28703.change.rst +++ /dev/null @@ -1,3 +0,0 @@ -* Printing of ``np.float16`` and ``np.float32`` scalars and arrays have been improved by - adjusting the transition to scientific notation based on the floating point precision. - A new legacy ``np.printoptions`` mode ``'2.2'`` has been added for backwards compatibility. diff --git a/doc/release/upcoming_changes/28713.change.rst b/doc/release/upcoming_changes/28713.change.rst deleted file mode 100644 index 5e5c5adde88b..000000000000 --- a/doc/release/upcoming_changes/28713.change.rst +++ /dev/null @@ -1 +0,0 @@ -Remove use of -Wl,-ld_classic on macOS. This hack is no longer needed by Spack, and results in libraries that cannot link to other libraries built with ld (new). diff --git a/doc/release/upcoming_changes/28741.change.rst b/doc/release/upcoming_changes/28741.change.rst deleted file mode 100644 index ca9531f490d8..000000000000 --- a/doc/release/upcoming_changes/28741.change.rst +++ /dev/null @@ -1 +0,0 @@ -Re-enable overriding functions in the :mod:`numpy.strings` module. diff --git a/doc/release/upcoming_changes/28769.performance.rst b/doc/release/upcoming_changes/28769.performance.rst deleted file mode 100644 index 7fb8f02282f6..000000000000 --- a/doc/release/upcoming_changes/28769.performance.rst +++ /dev/null @@ -1,8 +0,0 @@ -Performance improvements for ``np.float16`` casts --------------------------------------------------- -Earlier, floating point casts to and from ``np.float16`` types -were emulated in software on all platforms. - -Now, on ARM devices that support Neon float16 intrinsics (such as -recent Apple Silicon), the native float16 path is used to achieve -the best performance. diff --git a/doc/release/upcoming_changes/28856.improvement.rst b/doc/release/upcoming_changes/28856.improvement.rst deleted file mode 100644 index 83911035f097..000000000000 --- a/doc/release/upcoming_changes/28856.improvement.rst +++ /dev/null @@ -1,5 +0,0 @@ -* ``np.dtypes.StringDType`` is now a - `generic type `_ which - accepts a type argument for ``na_object`` that defaults to ``typing.Never``. - For example, ``StringDType(na_object=None)`` returns a ``StringDType[None]``, - and ``StringDType()`` returns a ``StringDType[typing.Never]``. diff --git a/doc/release/upcoming_changes/28884.deprecation.rst b/doc/release/upcoming_changes/28884.deprecation.rst deleted file mode 100644 index c1be55fb0dd3..000000000000 --- a/doc/release/upcoming_changes/28884.deprecation.rst +++ /dev/null @@ -1,28 +0,0 @@ -``numpy.typing.NBitBase`` deprecation -------------------------------------- -The ``numpy.typing.NBitBase`` type has been deprecated and will be removed in a future version. - -This type was previously intended to be used as a generic upper bound for type-parameters, for example: - -.. code-block:: python - - import numpy as np - import numpy.typing as npt - - def f[NT: npt.NBitBase](x: np.complexfloating[NT]) -> np.floating[NT]: ... - -But in NumPy 2.2.0, ``float64`` and ``complex128`` were changed to concrete subtypes, causing static type-checkers to reject ``x: np.float64 = f(np.complex128(42j))``. - -So instead, the better approach is to use ``typing.overload``: - -.. code-block:: python - - import numpy as np - from typing import overload - - @overload - def f(x: np.complex64) -> np.float32: ... - @overload - def f(x: np.complex128) -> np.float64: ... - @overload - def f(x: np.clongdouble) -> np.longdouble: ... diff --git a/doc/release/upcoming_changes/28940.new_feature.rst b/doc/release/upcoming_changes/28940.new_feature.rst deleted file mode 100644 index e0d3dc8888c3..000000000000 --- a/doc/release/upcoming_changes/28940.new_feature.rst +++ /dev/null @@ -1,6 +0,0 @@ -* Static type-checkers now interpret: - - - ``_: np.ndarray`` as ``_: npt.NDArray[typing.Any]``. - - ``_: np.flatiter`` as ``_: np.flatiter[np.ndarray]``. - - This is because their type parameters now have default values. diff --git a/doc/release/upcoming_changes/28961.expired.rst b/doc/release/upcoming_changes/28961.expired.rst deleted file mode 100644 index 92031de35e62..000000000000 --- a/doc/release/upcoming_changes/28961.expired.rst +++ /dev/null @@ -1 +0,0 @@ -* Removed the ``np.compat`` package source code (removed in 2.0) diff --git a/doc/release/upcoming_changes/30340.expired.rst b/doc/release/upcoming_changes/30340.expired.rst new file mode 100644 index 000000000000..79dd57dde737 --- /dev/null +++ b/doc/release/upcoming_changes/30340.expired.rst @@ -0,0 +1 @@ +* ``numpy.distutils`` has been removed diff --git a/doc/source/_static/numpy.css b/doc/source/_static/numpy.css index 9df2f6c546c5..1555dafb5539 100644 --- a/doc/source/_static/numpy.css +++ b/doc/source/_static/numpy.css @@ -20,19 +20,7 @@ body { width: 15%; } -/* Version switcher colors from PyData Sphinx Theme */ - -.version-switcher__button[data-active-version-name*="devdocs"] { - background-color: var(--pst-color-warning); - border-color: var(--pst-color-warning); - opacity: 0.9; -} - -.version-switcher__button:not([data-active-version-name*="stable"]):not([data-active-version-name*="dev"]):not([data-active-version-name*="pull"]) { - background-color: var(--pst-color-danger); - border-color: var(--pst-color-danger); - opacity: 0.9; -} +/* Version switcher from PyData Sphinx Theme */ .version-switcher__menu a.list-group-item { font-size: small; diff --git a/doc/source/building/distutils_equivalents.rst b/doc/source/building/distutils_equivalents.rst index 156174d02358..65821bfec9d9 100644 --- a/doc/source/building/distutils_equivalents.rst +++ b/doc/source/building/distutils_equivalents.rst @@ -3,7 +3,7 @@ Meson and ``distutils`` ways of doing things -------------------------------------------- -*Old workflows (numpy.distutils based):* +*Old workflows (numpy.distutils based, no longer relevant):* 1. ``python runtests.py`` 2. ``python setup.py build_ext -i`` + ``export diff --git a/doc/source/building/index.rst b/doc/source/building/index.rst index d7baeaee9324..d027ecb0ee8f 100644 --- a/doc/source/building/index.rst +++ b/doc/source/building/index.rst @@ -473,7 +473,7 @@ interface is self-documenting, so please see ``spin --help`` and install"). Editable installs are supported. It is important to understand that **you - may use either an editable install or ``spin`` in a given repository clone, + may use either an editable install or** ``spin`` **in a given repository clone, but not both**. If you use editable installs, you have to use ``pytest`` and other development tools directly instead of using ``spin``. diff --git a/doc/source/building/understanding_meson.rst b/doc/source/building/understanding_meson.rst index b990ff283271..0c29302c9abb 100644 --- a/doc/source/building/understanding_meson.rst +++ b/doc/source/building/understanding_meson.rst @@ -87,11 +87,11 @@ that's just an arbitrary name we picked here):: meson install -C build -It will then install to ``build-install/lib/python3.11/site-packages/numpy``, +It will then install to ``build-install/lib/python3.12/site-packages/numpy``, which is not on your Python path, so to add it do (*again, this is for learning purposes, using ``PYTHONPATH`` explicitly is typically not the best idea*):: - export PYTHONPATH=$PWD/build-install/lib/python3.11/site-packages/ + export PYTHONPATH=$PWD/build-install/lib/python3.12/site-packages/ Now we should be able to import ``numpy`` and run the tests. Remembering that we need to move out of the root of the repo to ensure we pick up the package diff --git a/doc/source/conf.py b/doc/source/conf.py index e3146bf768c9..f6e7fc57bde7 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -114,7 +114,7 @@ class PyTypeObject(ctypes.Structure): templates_path = ['_templates'] # The suffix of source filenames. -source_suffix = '.rst' +source_suffix = {'.rst': 'restructuredtext'} # General substitutions. project = 'NumPy' @@ -145,14 +145,6 @@ class PyTypeObject(ctypes.Structure): # The reST default role (used for this markup: `text`) to use for all documents. default_role = "autolink" -# List of directories, relative to source directories, that shouldn't be searched -# for source files. -exclude_dirs = [] - -exclude_patterns = [] -if sys.version_info[:2] >= (3, 12): - exclude_patterns += ["reference/distutils.rst"] - # If true, '()' will be appended to :func: etc. cross-reference text. add_function_parentheses = False @@ -272,6 +264,10 @@ def setup(app): "json_url": "https://numpy.org/doc/_static/versions.json", }, "show_version_warning_banner": True, + "analytics": { + "plausible_analytics_domain": "numpy.org/doc/stable/", + "plausible_analytics_url": ("https://views.scientific-python.org/js/script.js"), + }, } html_title = f"{project} v{version} Manual" diff --git a/doc/source/dev/depending_on_numpy.rst b/doc/source/dev/depending_on_numpy.rst index 70476a3cc1b3..98dc552a779e 100644 --- a/doc/source/dev/depending_on_numpy.rst +++ b/doc/source/dev/depending_on_numpy.rst @@ -26,12 +26,13 @@ well known scientific Python projects, does **not** use semantic versioning. Instead, backwards incompatible API changes require deprecation warnings for at least two releases. For more details, see :ref:`NEP23`. -NumPy has both a Python API and a C API. The C API can be used directly or via -Cython, f2py, or other such tools. If your package uses the C API, then ABI -(application binary interface) stability of NumPy is important. NumPy's ABI is -forward but not backward compatible. This means: binaries compiled against a -given target version of NumPy's C API will still run correctly with newer NumPy -versions, but not with older versions. +NumPy provides both a Python API and a C-API. The C-API can be accessed +directly or through tools like Cython or f2py. If your package uses the +C-API, it's important to understand NumPy's application binary interface +(ABI) compatibility: NumPy's ABI is forward compatible but not backward +compatible. This means that binaries compiled against an older version of +NumPy will still work with newer versions, but binaries compiled against a +newer version will not necessarily work with older ones. Modules can also be safely built against NumPy 2.0 or later in :ref:`CPython's abi3 mode `, which allows @@ -87,16 +88,16 @@ Build-time dependency `__. -If a package either uses the NumPy C API directly or it uses some other tool +If a package either uses the NumPy C-API directly or it uses some other tool that depends on it like Cython or Pythran, NumPy is a *build-time* dependency of the package. -By default, NumPy will expose an API that is backwards compatible with the -oldest NumPy version that supports the currently oldest compatible Python -version. NumPy 1.25.0 supports Python 3.9 and higher and NumPy 1.19 is the -first version to support Python 3.9. Thus, we guarantee that, when using -defaults, NumPy 1.25 will expose a C-API compatible with NumPy 1.19. -(the exact version is set within NumPy-internal header files). +By default, NumPy exposes an API that is backward compatible with the earliest +NumPy version that supports the oldest Python version currently supported by +NumPy. For example, NumPy 1.25.0 supports Python 3.9 and above; and the +earliest NumPy version to support Python 3.9 was 1.19. Therefore we guarantee +NumPy 1.25 will, when using defaults, expose a C-API compatible with NumPy +1.19. (the exact version is set within NumPy-internal header files). NumPy is also forward compatible for all minor releases, but a major release will require recompilation (see NumPy 2.0-specific advice further down). @@ -127,14 +128,9 @@ compatible with a new major release of NumPy and may not be compatible with very old versions. For conda-forge packages, please see -`here `__. - -as of now, it is usually as easy as including:: - - host: - - numpy - run: - - {{ pin_compatible('numpy') }} +`here `__ +for instructions on how to declare a dependency on ``numpy`` when using the C +API. Runtime dependency & version ranges @@ -145,9 +141,7 @@ for dropping support for old Python and NumPy versions: :ref:`NEP29`. We recommend all packages depending on NumPy to follow the recommendations in NEP 29. -For *run-time dependencies*, specify version bounds using -``install_requires`` in ``setup.py`` (assuming you use ``numpy.distutils`` or -``setuptools`` to build). +For *run-time dependencies*, specify version bounds in `pyproject.toml`. Most libraries that rely on NumPy will not need to set an upper version bound: NumPy is careful to preserve backward-compatibility. @@ -157,7 +151,7 @@ frequently, (b) use a large part of NumPy's API surface, and (c) is worried that changes in NumPy may break your code, you can set an upper bound of ``=2.0`` (or go. We'll focus on the "keep compatibility with 1.xx and 2.x" now, which is a little more involved. -*Example for a package using the NumPy C API (via C/Cython/etc.) which wants to support +*Example for a package using the NumPy C-API (via C/Cython/etc.) which wants to support NumPy 1.23.5 and up*: .. code:: ini diff --git a/doc/source/dev/development_advanced_debugging.rst b/doc/source/dev/development_advanced_debugging.rst index 6df7a3ecb64a..eb81b335f56a 100644 --- a/doc/source/dev/development_advanced_debugging.rst +++ b/doc/source/dev/development_advanced_debugging.rst @@ -5,11 +5,15 @@ Advanced debugging tools ======================== If you reached here, you want to dive into, or use, more advanced tooling. -This is usually not necessary for first time contributors and most +This is usually not necessary for first-time contributors and most day-to-day development. These are used more rarely, for example close to a new NumPy release, or when a large or particular complex change was made. +Some of these tools are used in NumPy's continuous integration tests. If you +see a test failure that only happens under a debugging tool, these instructions +should hopefully enable you to reproduce the test failure locally. + Since not all of these tools are used on a regular basis and only available on some systems, please expect differences, issues, or quirks; we will be happy to help if you get stuck and appreciate any improvements @@ -20,7 +24,7 @@ Finding C errors with additional tooling ######################################## Most development will not require more than a typical debugging toolchain -as shown in :ref:`Debugging `. +as shown in :ref:`Debugging `. But for example memory leaks can be particularly subtle or difficult to narrow down. @@ -32,7 +36,8 @@ However, you can ensure that we can track down such issues more easily: consider creating an additional simpler test as well. This can be helpful, because often it is only easy to find which test triggers an issue and not which line of the test. -* Never use ``np.empty`` if data is read/used. ``valgrind`` will notice this +* Never use ``np.empty`` if data is read/used. + `Valgrind `_ will notice this and report an error. When you do not care about values, you can generate random values instead. @@ -127,7 +132,8 @@ to mark them, but expect some false positives. ``valgrind`` ============ -Valgrind is a powerful tool to find certain memory access problems and should +`Valgrind `_ is a powerful tool +to find certain memory access problems and should be run on complicated C code. Basic use of ``valgrind`` usually requires no more than:: @@ -166,7 +172,7 @@ Valgrind helps: Python allocators.) Even though using valgrind for memory leak detection is slow and less sensitive -it can be a convenient: you can run most programs with valgrind without +it can be convenient: you can run most programs with valgrind without modification. Things to be aware of: @@ -213,3 +219,267 @@ command for NumPy). .. _pytest-valgrind: https://github.com/seberg/pytest-valgrind + +C debuggers +=========== + +Whenever NumPy crashes or when working on changes to NumPy's low-level C or C++ +code, it's often convenient to run Python under a C debugger to get more +information. A debugger can aid in understanding an interpreter crash (e.g. due +to a segmentation fault) by providing a C call stack at the site of the +crash. The call stack often provides valuable context to understand the nature +of a crash. C debuggers are also very useful during development, allowing +interactive debugging in the C implementation of NumPy. + +The NumPy developers often use both ``gdb`` and ``lldb`` to debug Numpy. As a +rule of thumb, ``gdb`` is often easier to use on Linux while ``lldb`` is easier +to use on a Mac environment. They have disjoint user interfaces, so you will need to +learn how to use whichever one you land on. The ``gdb`` to ``lldb`` `command map +`_ is a convenient reference for how to +accomplish common recipes in both debuggers. + + +Building With Debug Symbols +--------------------------- + +The ``spin`` `development workflow tool +`_. has built-in support for working +with both ``gdb`` and ``lldb`` via the ``spin gdb`` and ``spin lldb`` commands. + +.. note:: + + Building with ``-Dbuildtype=debug`` has a couple of important effects to + be aware of: + + * **Assertions are enabled**: This build type does not define the ``NDEBUG`` + macro, which means that any C-level assertions in the code will be + active. This is very useful for debugging, as it can help pinpoint + where an unexpected condition occurs. + + * **Compiler flags may need overriding**: Some compiler toolchains, + particularly those from ``conda-forge``, may set optimization flags + like ``-O2`` by default. These can override the ``debug`` build type. + To ensure a true debug build in such environments, you may need to + manually unset or override this flag. + + For more details on both points, see the `meson-python guide on + debug builds `_. + +For both debuggers, it's advisable to build NumPy in either the ``debug`` or +``debugoptimized`` meson build profile. To use ``debug`` you can pass the option +via ``spin build``: + +.. code-block:: bash + + spin build -- -Dbuildtype=debug + +to use ``debugoptimized`` you're pass ``-Dbuildtype=debugoptimized`` instead. + +You can pass additional arguments to `meson setup +`_ besides ``buildtype`` using the +same positional argument syntax for ``spin build``. + +Running a Test Script +--------------------- + +Let's say you have a test script named `test.py` that lives in a ``test`` folder +in the same directory as the NumPy source checkout. You could execute the test +script using the ``spin`` build of NumPy with the following incantation: + +.. code-block:: bash + + spin gdb ../test/test.py + +This will launch into gdb. If all you care about is a call stack for a crash, +type "r" and hit enter. Your test script will run and if a crash happens, you +type "bt" to get a traceback. For ``lldb``, the instructions are similar, just +replace ``spin gdb`` with ``spin lldb``. + +You can also set breakpoints and use other more advanced techniques. See the +documentation for your debugger for more details. + +One common issue with breakpoints in NumPy is that some code paths get hit +repeatedly during the import of the ``numpy`` module. This can make it tricky or +tedious to find the first "real" call after the NumPy import has completed and +the ``numpy`` module is fully initialized. + +One workaround is to use a script like this: + +.. code-block:: python + + import os + import signal + + import numpy as np + + PID = os.getpid() + + def do_nothing(*args): + pass + + signal.signal(signal.SIGUSR1, do_nothing) + + os.kill(PID, signal.SIGUSR1) + + # the code to run under a debugger follows + + +This example installs a signal handler for the ``SIGUSR1`` signal that does +nothing and then calls ``os.kill`` on the Python process with the ``SIGUSR1`` +signal. This causes the signal handler to fire and critically also causes both +``gdb`` and ``lldb`` to halt execution inside of the ``kill`` syscall. + +If you run ``lldb`` you should see output something like this: + +.. code-block:: + + Process 67365 stopped + * thread #1, queue = 'com.apple.main-thread', stop reason = signal SIGUSR1 + frame #0: 0x000000019c4b9da4 libsystem_kernel.dylib`__kill + 8 + libsystem_kernel.dylib`__kill: + -> 0x19c4b9da4 <+8>: b.lo 0x19c4b9dc4 ; <+40> + 0x19c4b9da8 <+12>: pacibsp + 0x19c4b9dac <+16>: stp x29, x30, [sp, #-0x10]! + 0x19c4b9db0 <+20>: mov x29, sp + Target 0: (python3.13) stopped. + (lldb) bt + * thread #1, queue = 'com.apple.main-thread', stop reason = signal SIGUSR1 + * frame #0: 0x000000019c4b9da4 libsystem_kernel.dylib`__kill + 8 + frame #1: 0x000000010087f5c4 libpython3.13.dylib`os_kill + 104 + frame #2: 0x000000010071374c libpython3.13.dylib`cfunction_vectorcall_FASTCALL + 276 + frame #3: 0x00000001006c1e3c libpython3.13.dylib`PyObject_Vectorcall + 88 + frame #4: 0x00000001007edd1c libpython3.13.dylib`_PyEval_EvalFrameDefault + 23608 + frame #5: 0x00000001007e7e6c libpython3.13.dylib`PyEval_EvalCode + 252 + frame #6: 0x0000000100852944 libpython3.13.dylib`run_eval_code_obj + 180 + frame #7: 0x0000000100852610 libpython3.13.dylib`run_mod + 220 + frame #8: 0x000000010084fa4c libpython3.13.dylib`_PyRun_SimpleFileObject + 868 + frame #9: 0x000000010084f400 libpython3.13.dylib`_PyRun_AnyFileObject + 160 + frame #10: 0x0000000100874ab8 libpython3.13.dylib`pymain_run_file + 336 + frame #11: 0x0000000100874324 libpython3.13.dylib`Py_RunMain + 1516 + frame #12: 0x000000010087459c libpython3.13.dylib`pymain_main + 324 + frame #13: 0x000000010087463c libpython3.13.dylib`Py_BytesMain + 40 + frame #14: 0x000000019c152b98 dyld`start + 6076 + (lldb) + +As you can see, the C stack trace is inside of the ``kill`` syscall and an +``lldb`` prompt is active, allowing interactively setting breakpoints. Since the +``os.kill`` call happens after the ``numpy`` module is already fully +initialized, this means any breakpoints set inside of ``kill`` will happen +*after* ``numpy`` is finished initializing. + +Use together with ``pytest`` +---------------------------- + +You can also run ``pytest`` tests under a debugger. This requires using +the debugger in a slightly more manual fashion, since ``spin`` does not yet +automate this process. First, run ``spin build`` to ensure there is a fully +built copy of NumPy managed by ``spin``. Then, to run the tests under ``lldb`` +you would do something like this: + +.. code-block:: bash + + spin lldb $(which python) $(which pytest) build-install/usr/lib/python3.13/site-packages/numpy/_core/tests/test_multiarray.py + +This will execute the tests in ``test_multiarray.py`` under lldb after typing +'r' and hitting enter. Note that this command comes from a session using Python +3.13 on a Mac. If you are using a different Python version or operating system, +the directory layout inside ``build-install`` may be slightly different. + +You can set breakpoints as described above. The issue about breakpoints +commonly being hit during NumPy import also applies - consider refactoring your +test workflow into a test script so you can adopt the workaround using +``os.kill`` described above. + +Note the use of ``$(which python)`` to ensure the debugger receives a path to a +Python executable. If you are using ``pyenv``, you may need to replace ``which +python`` with ``pyenv which python``, since ``pyenv`` relies on shim scripts +that ``which`` doesn't know about. + + +Compiler Sanitizers +=================== + +The `compiler sanitizer `_ suites +shipped by both GCC and LLVM offer a means to detect many common programming +errors at runtime. The sanitizers work by instrumenting the application code at +build time so additional runtime checks fire. Typically, sanitizers are run +during the course of regular testing and if a sanitizer check fails, this leads +to a test failure or crash, along with a report about the nature of the failure. + +While it is possible to use sanitizers with a "regular" build of CPython - it is +best if you can set up a Python environment based on a from-source Python build +with sanitizer instrumentation, and then use the instrumented Python to build +NumPy and run the tests. If the entire Python stack is instrumented using the +same sanitizer runtime, it becomes possible to identify issues that happen +across the Python stack. This enables detecting memory leaks in NumPy due to +misuse of memory allocated in CPython, for example. + +Build Python with Sanitizer Instrumentation +------------------------------------------- + +See the `section in the Python developer's guide +`_ on this topic for +more information about building Python from source. To enable address sanitizer, +you will need to pass ``--with-address-sanitizer`` to the ``configure`` script +invocation when you build Python. + +You can also use `pyenv `_ to automate the +process of building Python and quickly activate or deactivate a Python +installation using a command-line interface similar to virtual +environments. With ``pyenv`` you could install an ASAN-instrumented build of +Python 3.13 like this: + +.. code-block:: bash + + CONFIGURE_OPTS="--with-address-sanitizer" pyenv install 3.13 + +If you are interested in thread sanitizer, the ``cpython_sanity`` `docker images +`_ might also be a quicker choice +that bypasses building Python from source, although it may be annoying to do +debugging work inside of a docker image. + +Use together with ``spin`` +-------------------------- + +However you build Python, once you have an instrumented Python build, you can +install NumPy's development and test dependencies and build NumPy with address +sanitizer instrumentation. For example, to build NumPy with the ``debug`` +profile and address sanitizer, you would pass additional build options to +``meson`` like this: + +.. code-block:: bash + + spin build -- -Dbuildtype=debug -Db_sanitize=address + + +Once the build is finished, you can use other ``spin`` command like ``spin +test`` and ``spin gdb`` as with any other Python build. + +Special considerations +---------------------- + +Some NumPy tests intentionally lead to ``malloc`` returning ``NULL``. In its +default configuration, some of the compiler sanitizers flag this as an +error. You can disable that check by passing ``allocator_may_return_null=1`` to +the sanitizer as an option. For example, with address sanitizer: + +.. code-block:: bash + + ASAN_OPTIONS=allocator_may_return_null=1 spin test + +You may see memory leaks coming from the Python interpreter, particularly on +MacOS. If the memory leak reports are not useful, you can disable leak detection +by passing ``detect_leaks=0`` in ``ASAN_OPTIONS``. You can pass more than one +option using a colon-delimited list, like this: + +.. code-block:: bash + + ASAN_OPTIONS=allocator_may_return_null=1:halt_on_error=1:detect_leaks=1 spin test + +The ``halt_on_error`` option can be particularly useful -- it hard-crashes the +Python executable whenever it detects an error, along with a report about the +error that includes a stack trace. + +You can also take a look at the ``compiler_sanitizers.yml`` GitHub actions +workflow configuration. It describes several different CI jobs that are run as +part of the NumPy tests using Thread, Address, and Undefined Behavior sanitizer. diff --git a/doc/source/dev/development_environment.rst b/doc/source/dev/development_environment.rst index 7a6dc36b680d..c2085a0013ef 100644 --- a/doc/source/dev/development_environment.rst +++ b/doc/source/dev/development_environment.rst @@ -92,6 +92,7 @@ one of:: $ spin test -v $ spin test numpy/random # to run the tests in a specific module $ spin test -v -t numpy/_core/tests/test_nditer.py::test_iter_c_order + $ spin test -p auto # to run tests in parallel threads using pytest-run-parallel This builds NumPy first, so the first time it may take a few minutes. @@ -185,6 +186,16 @@ For more extensive information, see :ref:`testing-guidelines`. Note: do not run the tests from the root directory of your numpy git repo without ``spin``, that will result in strange test errors. +Running type checks +------------------- +Changes that involve static type declarations are also executed using ``spin``. +The invocation will look like the following: + + $ spin mypy + +This will look in the ``typing/tests`` directory for sets of operations to +test for type incompatibility. + Running linting --------------- Lint checks can be performed on newly added lines of Python code. diff --git a/doc/source/dev/development_workflow.rst b/doc/source/dev/development_workflow.rst index fa5f8b1e65b7..10b07cc1f437 100644 --- a/doc/source/dev/development_workflow.rst +++ b/doc/source/dev/development_workflow.rst @@ -205,13 +205,6 @@ these fragments in each commit message of a PR: settings. `See the configuration files for these checks. `__ -* ``[skip azp]``: skip Azure jobs - - `Azure `__ is - where all comprehensive tests are run. This is an expensive run, and one you - could typically skip if you do documentation-only changes, for example. - `See the main configuration file for these checks. `__ - * ``[skip circle]``: skip CircleCI jobs `CircleCI `__ is where we build the documentation and diff --git a/doc/source/dev/internals.code-explanations.rst b/doc/source/dev/internals.code-explanations.rst index b1ee9b114aa8..1bb8f60528c1 100644 --- a/doc/source/dev/internals.code-explanations.rst +++ b/doc/source/dev/internals.code-explanations.rst @@ -401,7 +401,7 @@ Iterators for the output arguments are then processed. Finally, the decision is made about how to execute the looping mechanism to ensure that all elements of the input arrays are combined to produce the output arrays of the correct type. The options for loop -execution are one-loop (for :term`contiguous`, aligned, and correct data +execution are one-loop (for :term:`contiguous`, aligned, and correct data type), strided-loop (for non-contiguous but still aligned and correct data type), and a buffered loop (for misaligned or incorrect data type situations). Depending on which execution method is called for, diff --git a/doc/source/f2py/buildtools/distutils-to-meson.rst b/doc/source/f2py/buildtools/distutils-to-meson.rst index bf5da973e9fa..920848b9d0d3 100644 --- a/doc/source/f2py/buildtools/distutils-to-meson.rst +++ b/doc/source/f2py/buildtools/distutils-to-meson.rst @@ -5,12 +5,11 @@ ------------------------ As per the timeline laid out in :ref:`distutils-status-migration`, -``distutils`` has ceased to be the default build backend for ``f2py``. This page -collects common workflows in both formats. +``distutils`` has been removed. This page collects common workflows. .. note:: - This is a ****living**** document, `pull requests `_ are very welcome! + This is a **living** document, `pull requests `_ are very welcome! 1.1 Baseline ~~~~~~~~~~~~ @@ -44,8 +43,6 @@ This will not win any awards, but can be a reasonable starting point. 1.2.1 Basic Usage ^^^^^^^^^^^^^^^^^ -This is unchanged: - .. code:: bash python -m numpy.f2py -c fib.f90 -m fib @@ -57,46 +54,21 @@ This is unchanged: 1.2.2 Specify the backend ^^^^^^^^^^^^^^^^^^^^^^^^^ -.. tab-set:: - - .. tab-item:: Distutils - :sync: distutils - - .. code-block:: bash - - python -m numpy.f2py -c fib.f90 -m fib --backend distutils +.. code-block:: bash - This is the default for Python versions before 3.12. + python -m numpy.f2py -c fib.f90 -m fib - .. tab-item:: Meson - :sync: meson - - .. code-block:: bash - - python -m numpy.f2py -c fib.f90 -m fib --backend meson - - This is the only option for Python versions after 3.12. +This is the only option. There used to be a ``distutils`` backend but it was +removed in NumPy2.5.0. 1.2.3 Pass a compiler name ^^^^^^^^^^^^^^^^^^^^^^^^^^ -.. tab-set:: - - .. tab-item:: Distutils - :sync: distutils +.. code-block:: bash - .. code-block:: bash + FC=gfortran python -m numpy.f2py -c fib.f90 -m fib - python -m numpy.f2py -c fib.f90 -m fib --backend distutils --fcompiler=gfortran - - .. tab-item:: Meson - :sync: meson - - .. code-block:: bash - - FC="gfortran" python -m numpy.f2py -c fib.f90 -m fib --backend meson - - Native files can also be used. +Native files can also be used. Similarly, ``CC`` can be used in both cases to set the ``C`` compiler. Since the environment variables are generally pretty common across both, so a small @@ -117,14 +89,12 @@ sample is included below. +------------------------------------+-------------------------------+ | LDFLAGS | Linker options | +------------------------------------+-------------------------------+ - | LD\ :sub:`LIBRARY`\ \ :sub:`PATH`\ | Library file locations (Unix) | + | LD_LIBRARY_PATH | Library file locations (Unix) | +------------------------------------+-------------------------------+ | LIBS | Libraries to link against | +------------------------------------+-------------------------------+ | PATH | Search path for executables | +------------------------------------+-------------------------------+ - | LDFLAGS | Linker flags | - +------------------------------------+-------------------------------+ | CXX | C++ compiler | +------------------------------------+-------------------------------+ | CXXFLAGS | C++ compiler options | @@ -139,73 +109,31 @@ sample is included below. 1.2.4 Dependencies ^^^^^^^^^^^^^^^^^^ -Here, ``meson`` can actually be used to set dependencies more robustly. - -.. tab-set:: - - .. tab-item:: Distutils - :sync: distutils - - .. code-block:: bash +.. code-block:: bash - python -m numpy.f2py -c fib.f90 -m fib --backend distutils -llapack + python -m numpy.f2py -c fib.f90 -m fib --dep lapack - Note that this approach in practice is error prone. - - .. tab-item:: Meson - :sync: meson - - .. code-block:: bash - - python -m numpy.f2py -c fib.f90 -m fib --backend meson --dep lapack - - This maps to ``dependency("lapack")`` and so can be used for a wide variety - of dependencies. They can be `customized further `_ - to use CMake or other systems to resolve dependencies. +This maps to ``dependency("lapack")`` and so can be used for a wide variety +of dependencies. They can be `customized further `_ +to use CMake or other systems to resolve dependencies. 1.2.5 Libraries ^^^^^^^^^^^^^^^ -Both ``meson`` and ``distutils`` are capable of linking against libraries. - -.. tab-set:: - - .. tab-item:: Distutils - :sync: distutils - - .. code-block:: bash - - python -m numpy.f2py -c fib.f90 -m fib --backend distutils -lmylib -L/path/to/mylib +``meson`` is capable of linking against libraries. - .. tab-item:: Meson - :sync: meson +.. code-block:: bash - .. code-block:: bash - - python -m numpy.f2py -c fib.f90 -m fib --backend meson -lmylib -L/path/to/mylib + python -m numpy.f2py -c fib.f90 -m fib -lmylib -L/path/to/mylib 1.3 Customizing builds ~~~~~~~~~~~~~~~~~~~~~~ -.. tab-set:: - - .. tab-item:: Distutils - :sync: distutils - - .. code-block:: bash - - python -m numpy.f2py -c fib.f90 -m fib --backend distutils --build-dir blah - - This can be technically integrated with other codes, see :ref:`f2py-distutils`. - - .. tab-item:: Meson - :sync: meson - - .. code-block:: bash +.. code-block:: bash - python -m numpy.f2py -c fib.f90 -m fib --backend meson --build-dir blah + python -m numpy.f2py -c fib.f90 -m fib --build-dir blah - The resulting build can be customized via the - `Meson Build How-To Guide `_. - In fact, the resulting set of files can even be committed directly and used - as a meson subproject in a separate codebase. +The resulting build can be customized via the +`Meson Build How-To Guide `_. +In fact, the resulting set of files can even be committed directly and used +as a meson subproject in a separate codebase. diff --git a/doc/source/f2py/buildtools/distutils.rst b/doc/source/f2py/buildtools/distutils.rst deleted file mode 100644 index 87e17a811cd0..000000000000 --- a/doc/source/f2py/buildtools/distutils.rst +++ /dev/null @@ -1,84 +0,0 @@ -.. _f2py-distutils: - -============================= -Using via `numpy.distutils` -============================= - -.. legacy:: - - ``distutils`` has been removed in favor of ``meson`` see - :ref:`distutils-status-migration`. - - -.. currentmodule:: numpy.distutils.core - -:mod:`numpy.distutils` is part of NumPy, and extends the standard Python -``distutils`` module to deal with Fortran sources and F2PY signature files, e.g. -compile Fortran sources, call F2PY to construct extension modules, etc. - -.. topic:: Example - - Consider the following ``setup_file.py`` for the ``fib`` and ``scalar`` - examples from :ref:`f2py-getting-started` section: - - .. literalinclude:: ./../code/setup_example.py - :language: python - - Running - - .. code-block:: bash - - python setup_example.py build - - will build two extension modules ``scalar`` and ``fib2`` to the - build directory. - -Extensions to ``distutils`` -=========================== - -:mod:`numpy.distutils` extends ``distutils`` with the following features: - -* :class:`Extension` class argument ``sources`` may contain Fortran source - files. In addition, the list ``sources`` may contain at most one - F2PY signature file, and in this case, the name of an Extension module must - match with the ```` used in signature file. It is - assumed that an F2PY signature file contains exactly one ``python - module`` block. - - If ``sources`` do not contain a signature file, then F2PY is used to scan - Fortran source files to construct wrappers to the Fortran codes. - - Additional options to the F2PY executable can be given using the - :class:`Extension` class argument ``f2py_options``. - -* The following new ``distutils`` commands are defined: - - ``build_src`` - to construct Fortran wrapper extension modules, among many other things. - ``config_fc`` - to change Fortran compiler options. - - Additionally, the ``build_ext`` and ``build_clib`` commands are also enhanced - to support Fortran sources. - - Run - - .. code-block:: bash - - python config_fc build_src build_ext --help - - to see available options for these commands. - -* When building Python packages containing Fortran sources, one - can choose different Fortran compilers by using the ``build_ext`` - command option ``--fcompiler=``. Here ```` can be one of the - following names (on ``linux`` systems):: - - absoft compaq fujitsu g95 gnu gnu95 intel intele intelem lahey nag nagfor nv pathf95 pg vast - - See ``numpy_distutils/fcompiler.py`` for an up-to-date list of - supported compilers for different platforms, or run - - .. code-block:: bash - - python -m numpy.f2py -c --backend distutils --help-fcompiler diff --git a/doc/source/f2py/buildtools/index.rst b/doc/source/f2py/buildtools/index.rst index 37782e5ca74b..671fd5b6d2cf 100644 --- a/doc/source/f2py/buildtools/index.rst +++ b/doc/source/f2py/buildtools/index.rst @@ -11,7 +11,7 @@ with ``f2py``. The default build system for ``f2py`` has traditionally been through the enhanced ``numpy.distutils`` module. This module is based on ``distutils`` - which was removed in ``Python 3.12.0`` in **October 2023**. Like the rest of + which was removed in ``NumPy2.5.0`` in **June 2026**. Like the rest of NumPy and SciPy, ``f2py`` uses ``meson`` now, see :ref:`distutils-status-migration` for some more details. @@ -107,7 +107,6 @@ Build systems .. toctree:: :maxdepth: 2 - distutils meson cmake skbuild diff --git a/doc/source/f2py/buildtools/meson.rst b/doc/source/f2py/buildtools/meson.rst index c17c5d2ddc87..44560bef8c5f 100644 --- a/doc/source/f2py/buildtools/meson.rst +++ b/doc/source/f2py/buildtools/meson.rst @@ -15,11 +15,6 @@ Using via ``meson`` The default build system for ``f2py`` is now ``meson``, see :ref:`distutils-status-migration` for some more details.. -The key advantage gained by leveraging ``meson`` over the techniques described -in :ref:`f2py-distutils` is that this feeds into existing systems and larger -projects with ease. ``meson`` has a rather pythonic syntax which makes it more -comfortable and amenable to extension for ``python`` users. - Fibonacci walkthrough (F77) =========================== diff --git a/doc/source/f2py/code/setup_example.py b/doc/source/f2py/code/setup_example.py deleted file mode 100644 index ef79ad1ecfb6..000000000000 --- a/doc/source/f2py/code/setup_example.py +++ /dev/null @@ -1,16 +0,0 @@ -from numpy.distutils.core import Extension - -ext1 = Extension(name='scalar', - sources=['scalar.f']) -ext2 = Extension(name='fib2', - sources=['fib2.pyf', 'fib1.f']) - -if __name__ == "__main__": - from numpy.distutils.core import setup - setup(name='f2py_example', - description="F2PY Users Guide examples", - author="Pearu Peterson", - author_email="pearu@cens.ioc.ee", - ext_modules=[ext1, ext2] - ) -# End of setup_example.py diff --git a/doc/source/f2py/f2py.getting-started.rst b/doc/source/f2py/f2py.getting-started.rst index e5746c49e94d..b6951b11da8d 100644 --- a/doc/source/f2py/f2py.getting-started.rst +++ b/doc/source/f2py/f2py.getting-started.rst @@ -22,15 +22,12 @@ following steps: * F2PY compiles all sources and builds an extension module containing the wrappers. - * In building the extension modules, F2PY uses ``meson`` and used to use - ``numpy.distutils`` For different build systems, see :ref:`f2py-bldsys`. + * In building the extension modules, F2PY uses ``meson``. For different + build systems, see :ref:`f2py-bldsys`. .. note:: - See :ref:`f2py-meson-distutils` for migration information. - - * Depending on your operating system, you may need to install the Python development headers (which provide the file ``Python.h``) separately. In Linux Debian-based distributions this package should be called ``python3-dev``, @@ -224,7 +221,7 @@ Fortran code, we can apply the wrapping steps one by one. .. literalinclude:: ./code/fib2.pyf :language: fortran -* Finally, we build the extension module with ``numpy.distutils`` by running: +* Finally, we build the extension module by running: :: diff --git a/doc/source/f2py/index.rst b/doc/source/f2py/index.rst index b5cfb168073a..46f1de0212d6 100644 --- a/doc/source/f2py/index.rst +++ b/doc/source/f2py/index.rst @@ -45,6 +45,8 @@ end matches the NumPy version printed from ``python -m numpy.f2py``, then you can use the shorter version. If not, or if you cannot run ``f2py``, you should replace all calls to ``f2py`` mentioned in this guide with the longer version. +For Meson build examples, see :doc:`usage`. + .. toctree:: :maxdepth: 3 diff --git a/doc/source/f2py/usage.rst b/doc/source/f2py/usage.rst index 635455fdb58a..ec936bb72e1c 100644 --- a/doc/source/f2py/usage.rst +++ b/doc/source/f2py/usage.rst @@ -101,10 +101,6 @@ Here ```` may also contain signature files. Among other options and ``;`` on Windows. In ``CMake`` this corresponds to using ``$``. -``--help-link []`` - List system resources found by ``numpy_distutils/system_info.py``. For - example, try ``f2py --help-link lapack_opt``. - 3. Building a module ~~~~~~~~~~~~~~~~~~~~ @@ -127,7 +123,7 @@ module is constructed by scanning all Fortran source codes for routine signatures, before proceeding to build the extension module. .. warning:: - From Python 3.12 onwards, ``distutils`` has been removed. Use environment + ``distutils`` has been removed. Use environment variables or native files to interact with ``meson`` instead. See its `FAQ `__ for more information. @@ -135,17 +131,13 @@ Among other options (see below) and options described for previous modes, the fo .. note:: - .. versionchanged:: 1.26.0 - There are now two separate build backends which can be used, ``distutils`` - and ``meson``. Users are **strongly** recommended to switch to ``meson`` - since it is the default above Python ``3.12``. + .. versionchanged:: 2.5.0 + The ``distutils`` backend has been removed. Common build flags: ``--backend `` - Specify the build backend for the compilation process. The supported backends - are ``meson`` and ``distutils``. If not specified, defaults to ``distutils``. - On Python 3.12 or higher, the default is ``meson``. + Legacy option, only ``meson`` is supported. ``--f77flags=`` Specify F77 compiler flags ``--f90flags=`` @@ -165,39 +157,13 @@ Common build flags: Add directory ```` to the list of directories to be searched for ``-l``. -The ``meson`` specific flags are: - -``--dep `` **meson only** +``--dep `` Specify a meson dependency for the module. This may be passed multiple times for multiple dependencies. Dependencies are stored in a list for further processing. Example: ``--dep lapack --dep scalapack`` This will identify "lapack" and "scalapack" as dependencies and remove them from argv, leaving a dependencies list containing ["lapack", "scalapack"]. -The older ``distutils`` flags are: - -``--help-fcompiler`` **no meson** - List the available Fortran compilers. -``--fcompiler=`` **no meson** - Specify a Fortran compiler type by vendor. -``--f77exec=`` **no meson** - Specify the path to a F77 compiler -``--f90exec=`` **no meson** - Specify the path to a F90 compiler -``--opt=`` **no meson** - Specify optimization flags -``--arch=`` **no meson** - Specify architecture specific optimization flags -``--noopt`` **no meson** - Compile without optimization flags -``--noarch`` **no meson** - Compile without arch-dependent optimization flags -``link-`` **no meson** - Link the extension module with as defined by - ``numpy_distutils/system_info.py``. E.g. to link with optimized LAPACK - libraries (vecLib on MacOSX, ATLAS elsewhere), use ``--link-lapack_opt``. - See also ``--help-link`` switch. - .. note:: The ``f2py -c`` option must be applied either to an existing ``.pyf`` file @@ -295,35 +261,64 @@ When using ``numpy.f2py`` as a module, the following functions can be invoked. .. automodule:: numpy.f2py :members: -Automatic extension module generation -===================================== +Building with Meson (Examples) +============================== + +Using f2py with Meson +~~~~~~~~~~~~~~~~~~~~~ + +Meson is a modern build system recommended for building Python extension +modules, especially starting with Python 3.12 and NumPy 2.x. Meson provides +a robust and maintainable way to build Fortran extensions with f2py. + +To build a Fortran extension using f2py and Meson, you can use Meson's +``custom_target`` to invoke f2py and generate the extension module. The +following minimal example demonstrates how to do this: + +This example shows how to build the ``add`` extension from the ``add.f`` and ``add.pyf`` +files described in the :ref:`f2py-examples` (note that you do not always need +a ``.pyf`` file: in many cases ``f2py`` can figure out the annotations by itself). + +Project layout: + + f2py_examples/ + meson.build + add.f + add.pyf (optional) + __init__.py (can be empty) + +Example ``meson.build``: + +.. code-block:: meson -If you want to distribute your f2py extension module, then you only -need to include the .pyf file and the Fortran code. The distutils -extensions in NumPy allow you to define an extension module entirely -in terms of this interface file. A valid ``setup.py`` file allowing -distribution of the ``add.f`` module (as part of the package -``f2py_examples`` so that it would be loaded as ``f2py_examples.add``) is: + project('f2py_examples', 'fortran') -.. code-block:: python + py = import('python').find_installation() - def configuration(parent_package='', top_path=None) - from numpy.distutils.misc_util import Configuration - config = Configuration('f2py_examples',parent_package, top_path) - config.add_extension('add', sources=['add.pyf','add.f']) - return config + # List your Fortran source files + sources = files('add.pyf', 'add.f') - if __name__ == '__main__': - from numpy.distutils.core import setup - setup(**configuration(top_path='').todict()) + # Build the extension by invoking f2py via a custom target + add_mod = custom_target( + 'add_extension', + input: sources, + output: ['add' + py.extension_suffix()], + command: [ + py.full_path(), '-m', 'numpy.f2py', + '-c', 'add.pyf', 'add.f', + '-m', 'add' + ], + build_by_default: true + ) -Installation of the new package is easy using:: + # Install into site-packages under the f2py_examples package + install_subdir('.', install_dir: join_paths(py.site_packages_dir(), 'f2py_examples'), + strip_directory: false, + exclude_files: ['meson.build']) - pip install . + # Also install the built extension (place it beside __init__.py) + install_data(add_mod, install_dir: join_paths(py.site_packages_dir(), 'f2py_examples')) -assuming you have the proper permissions to write to the main site- -packages directory for the version of Python you are using. For the -resulting package to work, you need to create a file named ``__init__.py`` -(in the same directory as ``add.pyf``). Notice the extension module is -defined entirely in terms of the ``add.pyf`` and ``add.f`` files. The -conversion of the .pyf file to a .c file is handled by `numpy.distutils`. +For more details and advanced usage, see the Meson build guide in the +user documentation or refer to SciPy's Meson build files for real-world +examples: https://github.com/scipy/scipy/tree/main/meson.build diff --git a/doc/source/f2py/windows/index.rst b/doc/source/f2py/windows/index.rst index ea0af7505ce7..aa7851da5dd2 100644 --- a/doc/source/f2py/windows/index.rst +++ b/doc/source/f2py/windows/index.rst @@ -71,12 +71,6 @@ Cygwin (FOSS) Cygwin is meant to compile UNIX software on Windows, instead of building native Windows programs. This means cross compilation is required. -The compilation suites described so far are compatible with the `now -deprecated`_ ``np.distutils`` build backend which is exposed by the F2PY CLI. -Additional build system usage (``meson``, ``cmake``) as described in -:ref:`f2py-bldsys` allows for a more flexible set of compiler -backends including: - Intel oneAPI The newer Intel compilers (``ifx``, ``icx``) are based on LLVM and can be used for native compilation. Licensing requirements can be onerous. diff --git a/doc/source/index.rst b/doc/source/index.rst index 02f3a8dc12b0..00d1bb62e6b3 100644 --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -21,6 +21,7 @@ NumPy documentation `Historical versions of documentation `_ **Useful links**: +`Home `_ | `Installation `_ | `Source Repository `_ | `Issue Tracker `_ | diff --git a/doc/source/reference/arrays.classes.rst b/doc/source/reference/arrays.classes.rst index 8a2e804eb36b..755a13ff7252 100644 --- a/doc/source/reference/arrays.classes.rst +++ b/doc/source/reference/arrays.classes.rst @@ -32,7 +32,7 @@ Note that :func:`asarray` always returns the base-class ndarray. If you are confident that your use of the array object can handle any subclass of an ndarray, then :func:`asanyarray` can be used to allow subclasses to propagate more cleanly through your subroutine. In -principal a subclass could redefine any aspect of the array and +principle, a subclass could redefine any aspect of the array and therefore, under strict guidelines, :func:`asanyarray` would rarely be useful. However, most subclasses of the array object will not redefine certain aspects of the array object such as the buffer @@ -479,16 +479,16 @@ Example: >>> import numpy as np - >>> a = np.memmap('newfile.dat', dtype=float, mode='w+', shape=1000) + >>> a = np.memmap('newfile.dat', dtype=np.float64, mode='w+', shape=1000) >>> a[10] = 10.0 >>> a[30] = 30.0 >>> del a - >>> b = np.fromfile('newfile.dat', dtype=float) + >>> b = np.fromfile('newfile.dat', dtype=np.float64) >>> print(b[10], b[30]) 10.0 30.0 - >>> a = np.memmap('newfile.dat', dtype=float) + >>> a = np.memmap('newfile.dat', dtype=np.float64) >>> print(a[10], a[30]) 10.0 30.0 diff --git a/doc/source/reference/arrays.datetime.rst b/doc/source/reference/arrays.datetime.rst index 8dbff88c918e..9cb7f59db78b 100644 --- a/doc/source/reference/arrays.datetime.rst +++ b/doc/source/reference/arrays.datetime.rst @@ -53,7 +53,14 @@ months ('M'), weeks ('W'), and days ('D'), while the time units are hours ('h'), minutes ('m'), seconds ('s'), milliseconds ('ms'), and some additional SI-prefix seconds-based units. The `datetime64` data type also accepts the string "NAT", in any combination of lowercase/uppercase -letters, for a "Not A Time" value. +letters, for a "Not A Time" value. The string "now" is also supported and +returns the current UTC time. By default, it uses second ('s') precision, but +you can specify a different unit (e.g., 'M', 'D', 'h') to truncate the result +to that precision. Units finer than seconds (such as 'ms' or 'ns') are +supported but will show fractional parts as zeros, effectively truncating to +whole seconds. The string "today" is also supported and returns the current UTC +date with day precision. It also supports the same precision specifiers +as ``now``. .. admonition:: Example @@ -91,6 +98,22 @@ letters, for a "Not A Time" value. >>> np.datetime64('nat') np.datetime64('NaT') + The current time (UTC, default second precision): + + >>> np.datetime64('now') + np.datetime64('2025-08-05T02:22:14') # result will depend on the current time + + >>> np.datetime64('now', 'D') + np.datetime64('2025-08-05') + + >>> np.datetime64('now', 'ms') + np.datetime64('2025-08-05T02:22:14.000') + + The current date: + + >>> np.datetime64('today') + np.datetime64('2025-08-05') # result will depend on the current date + When creating an array of datetimes from a string, it is still possible to automatically select the unit from the inputs, by using the datetime type with generic units. @@ -101,10 +124,10 @@ datetime type with generic units. >>> import numpy as np - >>> np.array(['2007-07-13', '2006-01-13', '2010-08-13'], dtype='datetime64') + >>> np.array(['2007-07-13', '2006-01-13', '2010-08-13'], dtype=np.datetime64) array(['2007-07-13', '2006-01-13', '2010-08-13'], dtype='datetime64[D]') - >>> np.array(['2001-01-01T12:00', '2002-02-03T13:56:03.172'], dtype='datetime64') + >>> np.array(['2001-01-01T12:00', '2002-02-03T13:56:03.172'], dtype=np.datetime64) array(['2001-01-01T12:00:00.000', '2002-02-03T13:56:03.172'], dtype='datetime64[ms]') @@ -303,6 +326,66 @@ us / Îŧs microsecond +/- 2.9e5 years [290301 BC, 294241 AD] as attosecond +/- 9.2 seconds [ 1969 AD, 1970 AD] ======== ================ ======================= ========================== + +Converting datetime and timedelta to Python Object +================================================== + +NumPy follows a strict protocol when converting `datetime64` and/or `timedelta64` to Python Objects (e.g., ``tuple``, ``list``, `datetime.datetime`). + +The protocol is described in the following table: + +================================ ================================= ================================== + Input Type for `datetime64` for `timedelta64` +================================ ================================= ================================== + ``NaT`` ``None`` ``None`` + ns/ps/fs/as ``int`` ``int`` + Îŧs/ms/s/m/h `datetime.datetime` `datetime.timedelta` + D/W (Linear units) `datetime.date` `datetime.timedelta` + Y/M (Non-linear units) `datetime.date` ``int`` + Generic units `datetime.date` ``int`` +================================ ================================= ================================== + +.. admonition:: Example + + .. try_examples:: + + >>> import numpy as np + + >>> type(np.datetime64('NaT').item()) + + + >>> type(np.timedelta64('NaT').item()) + + + >>> type(np.timedelta64(123, 'ns').item()) + + + >>> type(np.datetime64('2025-01-01T12:00:00.123456').item()) + + + >>> type(np.timedelta64(10, 'D').item()) + + + +In the case where conversion of `datetime64` and/or `timedelta64` is done against Python types like ``int``, ``float``, and ``str`` the corresponding return types will be ``np.str_``, ``np.int64`` and ``np.float64``. + + +.. admonition:: Example + + .. try_examples:: + + >>> import numpy as np + + >>> type(np.timedelta64(1, 'D').astype(int)) + + + >>> type(np.datetime64('2025-01-01T12:00:00.123456').astype(float)) + + + >>> type(np.timedelta64(123, 'ns').astype(str)) + + + Business day functionality ========================== diff --git a/doc/source/reference/arrays.dtypes.rst b/doc/source/reference/arrays.dtypes.rst index 3c757a4490e7..262c22655c76 100644 --- a/doc/source/reference/arrays.dtypes.rst +++ b/doc/source/reference/arrays.dtypes.rst @@ -254,12 +254,12 @@ One-character strings Array-protocol type strings (see :ref:`arrays.interface`) The first character specifies the kind of data and the remaining characters specify the number of bytes per item, except for Unicode, - where it is interpreted as the number of characters. The item size - must correspond to an existing type, or an error will be raised. The - supported kinds are + where it is interpreted as the number of characters, and except ``b1`` + which represents boolean. The item size must correspond to an existing + type, or an error will be raised. The supported kinds are - ================ ======================== - ``'?'`` boolean + ================= ======================== + ``'?'``, ``'b1'`` boolean ``'b'`` (signed) byte ``'B'`` unsigned byte ``'i'`` (signed) integer @@ -272,7 +272,7 @@ Array-protocol type strings (see :ref:`arrays.interface`) ``'S'``, ``'a'`` zero-terminated bytes (not recommended) ``'U'`` Unicode string ``'V'`` raw data (:class:`void`) - ================ ======================== + ================= ======================== .. admonition:: Example @@ -561,7 +561,7 @@ This equivalence can only be handled through ``==``, not through ``is``. >>> import numpy as np - >>> a = np.array([1, 2], dtype=float) + >>> a = np.array([1, 2], dtype=np.float64) >>> a.dtype == np.dtype(np.float64) True >>> a.dtype == np.float64 diff --git a/doc/source/reference/arrays.interface.rst b/doc/source/reference/arrays.interface.rst index ebe3f6b68918..75c17060c8fc 100644 --- a/doc/source/reference/arrays.interface.rst +++ b/doc/source/reference/arrays.interface.rst @@ -120,7 +120,7 @@ This approach to the interface consists of the object having an **Default**: ``[('', typestr)]`` - **data** (optional) + **data** A 2-tuple whose first argument is a :doc:`Python integer ` that points to the data-area storing the array contents. @@ -136,15 +136,23 @@ This approach to the interface consists of the object having an This attribute can also be an object exposing the :ref:`buffer interface ` which - will be used to share the data. If this key is not present (or - returns None), then memory sharing will be done - through the buffer interface of the object itself. In this + will be used to share the data. If this key is ``None``, then memory sharing + will be done through the buffer interface of the object itself. In this case, the offset key can be used to indicate the start of the buffer. A reference to the object exposing the array interface must be stored by the new object if the memory area is to be secured. - **Default**: ``None`` + .. note:: + Not specifying this field uses a "scalar" path that we may remove in the future + as we are not aware of any users. In this case, NumPy assigns the original object + as a scalar into the array. + + .. versionchanged:: 2.4 + Prior to NumPy 2.4 a ``NULL`` pointer used the undocumented "scalar" path + and was thus usually not accepted (and triggered crashes on some paths). + After NumPy 2.4, ``NULL`` is accepted, although NumPy will create a 1-byte sized + new allocation for the array. **strides** (optional) Either ``None`` to indicate a C-style contiguous array or diff --git a/doc/source/reference/arrays.promotion.rst b/doc/source/reference/arrays.promotion.rst index d2dead0ce7b5..32e503383217 100644 --- a/doc/source/reference/arrays.promotion.rst +++ b/doc/source/reference/arrays.promotion.rst @@ -79,10 +79,10 @@ their precision when determining the result dtype. This is often convenient. For instance, when working with arrays of a low precision dtype, it is usually desirable for simple operations with Python scalars to preserve the dtype. - >>> arr_float32 = np.array([1, 2.5, 2.1], dtype="float32") + >>> arr_float32 = np.array([1, 2.5, 2.1], dtype=np.float32) >>> arr_float32 + 10.0 # undesirable to promote to float64 array([11. , 12.5, 12.1], dtype=float32) - >>> arr_int16 = np.array([3, 5, 7], dtype="int16") + >>> arr_int16 = np.array([3, 5, 7], dtype=np.int16) >>> arr_int16 + 10 # undesirable to promote to int64 array([13, 15, 17], dtype=int16) @@ -130,7 +130,7 @@ overflows: ... RuntimeWarning: overflow encountered in scalar add Note that NumPy warns when overflows occur for scalars, but not for arrays; -e.g., ``np.array(100, dtype="uint8") + 100`` will *not* warn. +e.g., ``np.array(100, dtype=np.uint8) + 100`` will *not* warn. Numerical promotion ------------------- diff --git a/doc/source/reference/c-api/array.rst b/doc/source/reference/c-api/array.rst index 02db78ebb2b1..d28e535f9428 100644 --- a/doc/source/reference/c-api/array.rst +++ b/doc/source/reference/c-api/array.rst @@ -784,7 +784,7 @@ cannot not be accessed directly. Allows setting of the itemsize, this is *only* relevant for string/bytes datatypes as it is the current pattern to define one with a new size. -.. c:function:: npy_intp PyDataType_ALIGNENT(PyArray_Descr *descr) +.. c:function:: npy_intp PyDataType_ALIGNMENT(PyArray_Descr *descr) The alignment of the datatype. @@ -1786,9 +1786,9 @@ the functions that must be implemented for each slot. - ``0.0`` is the default for ``sum([])``. But ``-0.0`` is the correct identity otherwise as it preserves the sign for ``sum([-0.0])``. - We use no identity for object, but return the default of ``0`` and - ``1`` for the empty ``sum([], dtype=object)`` and - ``prod([], dtype=object)``. - This allows ``np.sum(np.array(["a", "b"], dtype=object))`` to work. + ``1`` for the empty ``sum([], dtype=np.object_)`` and + ``prod([], dtype=np.object_)``. + This allows ``np.sum(np.array(["a", "b"], dtype=np.object_))`` to work. - ``-inf`` or ``INT_MIN`` for ``max`` is an identity, but at least ``INT_MIN`` not a good *default* when there are no items. @@ -1886,6 +1886,37 @@ with the rest of the ArrayMethod API. the main ufunc registration function. This adds a new implementation/loop to a ufunc. It replaces `PyUFunc_RegisterLoopForType`. +.. c:type:: PyUFunc_LoopSlot + + Structure used to add multiple loops to ufuncs from ArrayMethod specs. + This is used in `PyUFunc_AddLoopsFromSpecs`. + + .. c:struct:: PyUFunc_LoopSlot + + .. c:member:: const char *name + + The name of the ufunc to add the loop to, in the form like that of + entry points, ``(module ':')? (object '.')* name``, with ``numpy`` + the default module. Examples: ``sin``, ``strings.str_len``, + ``numpy.strings:str_len``. + + .. c:member:: PyArrayMethod_Spec *spec + + The ArrayMethod spec to use to create the loop. + +.. c:function:: int PyUFunc_AddLoopsFromSpecs( \ + PyUFunc_LoopSlot *slots) + + .. versionadded:: 2.4 + + Add multiple loops to ufuncs from ArrayMethod specs. This also + handles the registration of methods for the ufunc-like functions + ``sort`` and ``argsort``. See :ref:`array-methods-sorting` for details. + + The ``slots`` argument must be a NULL-terminated array of + `PyUFunc_LoopSlot` (see above), which give the name of the + ufunc and spec needed to create the loop. + .. c:function:: int PyUFunc_AddPromoter( \ PyObject *ufunc, PyObject *DType_tuple, PyObject *promoter) @@ -2036,6 +2067,36 @@ code: Py_INCREF(loop_descrs[2]); } +.. _array-methods-sorting: + +Sorting and Argsorting +~~~~~~~~~~~~~~~~~~~~~~~ + +Sorting and argsorting methods for dtypes can be registered using the +ArrayMethod API. This is done by adding an ArrayMethod spec with the name +``"sort"`` or ``"argsort"`` respectively. The spec must have ``nin=1`` +and ``nout=1`` for both sort and argsort. Sorting is inplace, hence we +enforce that ``data[0] == data[1]``. Argsorting returns a new array of +indices, so the output must be of ``NPY_INTP`` type. + +The ``context`` passed to the loop contains the ``parameters`` field which +for these operations is a ``PyArrayMethod_SortParameters *`` struct. This +struct contains a ``flags`` field which is a bitwise OR of ``NPY_SORTKIND`` +values indicating the kind of sort to perform (that is, whether it is a +stable and/or descending sort). If the strided loop depends on the flags, +a good way to deal with this is to define :c:macro:`NPY_METH_get_loop`, +and not set any of the other loop slots. + +.. c:struct:: PyArrayMethod_SortParameters + + .. c:member:: NPY_SORTKIND flags + + The flags passed to the sort operation. This is a bitwise OR of + ``NPY_SORTKIND`` values indicating the kind of sort to perform. + +These specs can be registered using :c:func:`PyUFunc_AddLoopsFromSpecs` +along with other ufunc loops. + API for calling array methods ----------------------------- @@ -2187,19 +2248,18 @@ Shape Manipulation PyArrayObject* self, PyArray_Dims* newshape, int refcheck, \ NPY_ORDER fortran) - Equivalent to :meth:`ndarray.resize` (*self*, *newshape*, refcheck - ``=`` *refcheck*, order= fortran ). This function only works on - single-segment arrays. It changes the shape of *self* inplace and - will reallocate the memory for *self* if *newshape* has a - different total number of elements then the old shape. If - reallocation is necessary, then *self* must own its data, have - *self* - ``>base==NULL``, have *self* - ``>weakrefs==NULL``, and - (unless refcheck is 0) not be referenced by any other array. - The fortran argument can be :c:data:`NPY_ANYORDER`, :c:data:`NPY_CORDER`, - or :c:data:`NPY_FORTRANORDER`. It currently has no effect. Eventually - it could be used to determine how the resize operation should view - the data when constructing a differently-dimensioned array. - Returns None on success and NULL on error. + Equivalent to :meth:`ndarray.resize` (*self*, *newshape*, *refcheck*). + This function only works on single-segment arrays. It changes the shape of + *self* inplace and will reallocate the memory for *self* if *newshape* has + a different total number of elements then the old shape. If reallocation is + necessary, then *self* must own its data, have *self* - ``>base==NULL``, + have *self* - ``>weakrefs==NULL``, and (unless refcheck is 0) not be + referenced by any other array. The fortran argument can be + :c:data:`NPY_ANYORDER`, :c:data:`NPY_CORDER`, or + :c:data:`NPY_FORTRANORDER`. It currently has no effect. Eventually it + could be used to determine how the resize operation should view the data + when constructing a differently-dimensioned array. Returns None on success + and NULL on error. .. c:function:: PyObject* PyArray_Transpose( \ PyArrayObject* self, PyArray_Dims* permute) @@ -2303,21 +2363,36 @@ Item selection and manipulation .. c:function:: PyObject* PyArray_Sort(PyArrayObject* self, int axis, NPY_SORTKIND kind) - Equivalent to :meth:`ndarray.sort` (*self*, *axis*, *kind*). - Return an array with the items of *self* sorted along *axis*. The array - is sorted using the algorithm denoted by *kind*, which is an integer/enum pointing - to the type of sorting algorithms used. - -.. c:function:: PyObject* PyArray_ArgSort(PyArrayObject* self, int axis) - - Equivalent to :meth:`ndarray.argsort` (*self*, *axis*). - Return an array of indices such that selection of these indices - along the given ``axis`` would return a sorted version of *self*. If *self* ->descr - is a data-type with fields defined, then self->descr->names is used - to determine the sort order. A comparison where the first field is equal - will use the second field and so on. To alter the sort order of a - structured array, create a new data-type with a different order of names - and construct a view of the array with that new data-type. + Return an array with the items of ``self`` sorted along ``axis``. The array + is sorted using an algorithm whose properties are specified by the value of + ``kind``, an integer/enum specifying the reguirements of the sorting + algorithm used. If ``self* ->descr`` is a data-type with fields defined, + then ``self->descr->names`` is used to determine the sort order. A comparison + where the first field is equal will use the second field and so on. To + alter the sort order of a structured array, create a new data-type with a + different order of names and construct a view of the array with that new + data-type. + + This is the C level function called by the ndarray method + :meth:`ndarray.sort`, though with a different meaning + of ``kind`` -- see ``NPY_SORTKIND`` below. + +.. c:function:: PyObject* PyArray_ArgSort(PyArrayObject* self, int axis, NPY_SORTKIND kind) + + Return an array of indices such that selection of these indices along the + given ``axis`` would return a sorted version of ``self``. The array is + sorted using an algorithm whose properties are specified by ``kind``, an + integer/enum specifying the reguirements of the sorting algorithm used. If + ``self->descr`` is a data-type with fields defined, then + ``self->descr->names`` is used to determine the sort order. A comparison + where the first field is equal will use the second field and so on. To + alter the sort order of a structured array, create a new data-type with a + different order of names and construct a view of the array with that new + data-type. + + This is the C level function called by the ndarray method + :meth:`ndarray.argsort`, though with a different + meaning of ``kind`` -- see ``NPY_SORTKIND`` below. .. c:function:: PyObject* PyArray_LexSort(PyObject* sort_keys, int axis) @@ -3521,6 +3596,121 @@ member of ``PyArrayDTypeMeta_Spec`` struct. force newly created arrays to have a newly created descriptor instance, no matter what input descriptor is provided by a user. +.. c:macro:: NPY_DT_get_constant + +.. c:type:: int (PyArrayDTypeMeta_GetConstant)( \ + PyArray_Descr *descr, int constant_id, void *out) + + If defined, allows the DType to expose constant values such as machine + limits, special values (infinity, NaN), and floating-point characteristics. + The *descr* is the descriptor instance, *constant_id* is one of the + ``NPY_CONSTANT_*`` macros, and *out* is a pointer to uninitialized memory + where the constant value should be written. The memory pointed to by *out* + may be unaligned and is uninitialized. + Returns 1 on success, 0 if the constant is not available, + or -1 with an error set. + + **Constant IDs**: + + The following constant IDs are defined for retrieving dtype-specific values: + + **Basic constants** (available for all numeric types): + + .. c:macro:: NPY_CONSTANT_zero + + The zero value for the dtype. + + .. c:macro:: NPY_CONSTANT_one + + The one value for the dtype. + + .. c:macro:: NPY_CONSTANT_minimum_finite + + The minimum finite value representable by the dtype. For floating-point types, + this is the most negative finite value (e.g., ``-FLT_MAX``). + + .. c:macro:: NPY_CONSTANT_maximum_finite + + The maximum finite value representable by the dtype. + + **Floating-point special values**: + + .. c:macro:: NPY_CONSTANT_inf + + Positive infinity (only for floating-point types). + + .. c:macro:: NPY_CONSTANT_ninf + + Negative infinity (only for floating-point types). + + .. c:macro:: NPY_CONSTANT_nan + + Not-a-Number (only for floating-point types). + + **Floating-point characteristics** (values of the dtype's native type): + + .. c:macro:: NPY_CONSTANT_finfo_radix + + The radix (base) of the floating-point representation. This is 2 for all + floating-point types. + + .. c:macro:: NPY_CONSTANT_finfo_eps + + Machine epsilon: the difference between 1.0 and the next representable value + greater than 1.0. Corresponds to C macros like ``FLT_EPSILON``, ``DBL_EPSILON``. + + .. note:: + For long double in IBM double-double format (PowerPC), this is defined as + ``0x1p-105L`` (2^-105) based on the ~106 bits of mantissa precision. + + .. c:macro:: NPY_CONSTANT_finfo_epsneg + + The difference between 1.0 and the next representable value less than 1.0. + Typically ``eps / radix`` for binary floating-point types. + + .. c:macro:: NPY_CONSTANT_finfo_smallest_normal + + The smallest positive normalized floating-point number. Corresponds to C + macros like ``FLT_MIN``, ``DBL_MIN``. This is the smallest value with a + leading 1 bit in the mantissa. + + .. c:macro:: NPY_CONSTANT_finfo_smallest_subnormal + + The smallest positive subnormal (denormalized) floating-point number. + Corresponds to C macros like ``FLT_TRUE_MIN``, ``DBL_TRUE_MIN``. This is + the smallest representable positive value, with leading 0 bits in the mantissa. + + **Floating-point characteristics** (integer values, type ``npy_intp``): + + These constants return integer metadata about the floating-point representation. + They are marked with the ``1 << 16`` bit to indicate they return ``npy_intp`` + values rather than the dtype's native type. + + .. c:macro:: NPY_CONSTANT_finfo_nmant + + Number of mantissa bits (excluding the implicit leading bit). For example, + IEEE 754 binary64 (double) has 52 explicit mantissa bits, so this returns 52. + Corresponds to ``MANT_DIG - 1`` from C standard macros. + + .. c:macro:: NPY_CONSTANT_finfo_min_exp + + Minimum exponent value. This is the minimum negative integer such that the + radix raised to the power of one less than that integer is a normalized + floating-point number. Corresponds to ``MIN_EXP - 1`` from C standard macros + (e.g., ``FLT_MIN_EXP - 1``). + + .. c:macro:: NPY_CONSTANT_finfo_max_exp + + Maximum exponent value. This is the maximum positive integer such that the + radix raised to the power of one less than that integer is a representable + finite floating-point number. Corresponds to ``MAX_EXP`` from C standard + macros (e.g., ``FLT_MAX_EXP``). + + .. c:macro:: NPY_CONSTANT_finfo_decimal_digits + + The number of decimal digits of precision. Corresponds to ``DIG`` from C + standard macros (e.g., ``FLT_DIG``, ``DBL_DIG``). + PyArray_ArrFuncs slots ^^^^^^^^^^^^^^^^^^^^^^ @@ -3814,7 +4004,7 @@ In this case, the helper C files typically do not have a canonical place where ``PyArray_ImportNumPyAPI`` should be called (although it is OK and fast to call it often). -To solve this, NumPy provides the following pattern that the the main +To solve this, NumPy provides the following pattern that the main file is modified to define ``PY_ARRAY_UNIQUE_SYMBOL`` before the include: .. code-block:: c @@ -4082,6 +4272,8 @@ Memory management Returns 0 if nothing was done, -1 on error, and 1 if action was taken. +.. _array.ndarray.capi.threading: + Threading support ~~~~~~~~~~~~~~~~~ @@ -4321,7 +4513,11 @@ Enumerated Types .. c:enum:: NPY_SORTKIND A special variable-type which can take on different values to indicate - the sorting algorithm being used. + the sorting algorithm being used. These algorithm types have not been + treated strictly for some time, but rather treated as stable/not stable. + In NumPy 2.4 they are replaced by requirements (see below), but done in a + backwards compatible way. These values will continue to work, except that + that NPY_HEAPSORT will do the same thing as NPY_QUICKSORT. .. c:enumerator:: NPY_QUICKSORT @@ -4335,11 +4531,32 @@ Enumerated Types .. c:enumerator:: NPY_NSORTS - Defined to be the number of sorts. It is fixed at three by the need for - backwards compatibility, and consequently :c:data:`NPY_MERGESORT` and - :c:data:`NPY_STABLESORT` are aliased to each other and may refer to one - of several stable sorting algorithms depending on the data type. + Defined to be the number of sorts. It is fixed at three by the need for + backwards compatibility, and consequently :c:data:`NPY_MERGESORT` and + :c:data:`NPY_STABLESORT` are aliased to each other and may refer to one + of several stable sorting algorithms depending on the data type. + + In NumPy 2.4 the algorithm names are replaced by requirements. You can still use + the old values, a recompile is not needed, but they are reinterpreted such that + * NPY_QUICKSORT and NPY_HEAPSORT -> NPY_SORT_DEFAULT + * NPY_MERGESORT and NPY_STABLE -> NPY_SORT_STABLE + + .. c:enumerator:: NPY_SORT_DEFAULT + + The default sort for the type. For the NumPy builtin types it may be + stable or not, but will be ascending and sort NaN types to the end. It + is usually chosen for speed and/or low memory. + + .. c:enumerator:: NPY_SORT_STABLE + + (Requirement) Specifies that the sort must be stable. + + .. c:enumerator:: NPY_SORT_DESCENDING + + (Requirement) Specifies that the sort must be in descending order. + This functionality is not yet implemented for any of the NumPy types + and cannot yet be set from the Python interface. .. c:enum:: NPY_SCALARKIND @@ -4452,5 +4669,12 @@ Enumerated Types Allow any cast, no matter what kind of data loss may occur. +.. c:macro:: NPY_SAME_VALUE_CASTING + + Error if any values change during a cast. Currently + supported only in ``ndarray.astype(... casting='same_value')`` + + .. versionadded:: 2.4 + .. index:: pair: ndarray; C-API diff --git a/doc/source/reference/c-api/coremath.rst b/doc/source/reference/c-api/coremath.rst index c07abb47bc10..b2e3af4c0944 100644 --- a/doc/source/reference/c-api/coremath.rst +++ b/doc/source/reference/c-api/coremath.rst @@ -1,8 +1,7 @@ NumPy core math library ======================= -The numpy core math library (``npymath``) is a first step in this direction. This -library contains most math-related C99 functionality, which can be used on +This library contains most math-related C99 functionality, which can be used on platforms where C99 is not well supported. The core math functions have the same API as the C99 ones, except for the ``npy_*`` prefix. @@ -318,20 +317,6 @@ The generic steps to take are: machine. Otherwise you pick up a static library built for the wrong architecture. -When you build with ``numpy.distutils`` (deprecated), then use this in your ``setup.py``: - - .. hidden in a comment so as to be included in refguide but not rendered documentation - >>> import numpy.distutils.misc_util - >>> config = np.distutils.misc_util.Configuration(None, '', '.') - >>> with open('foo.c', 'w') as f: pass - - >>> from numpy.distutils.misc_util import get_info - >>> info = get_info('npymath') - >>> _ = config.add_extension('foo', sources=['foo.c'], extra_info=info) - -In other words, the usage of ``info`` is exactly the same as when using -``blas_info`` and co. - When you are building with `Meson `__, use:: # Note that this will get easier in the future, when Meson has diff --git a/doc/source/reference/c-api/dtype.rst b/doc/source/reference/c-api/dtype.rst index 43869d5b4c55..f6b2289ba18a 100644 --- a/doc/source/reference/c-api/dtype.rst +++ b/doc/source/reference/c-api/dtype.rst @@ -502,3 +502,4 @@ format specifier in printf and related commands. .. c:macro:: NPY_UINTP_FMT .. c:macro:: NPY_LONGDOUBLE_FMT + diff --git a/doc/source/reference/c-api/generalized-ufuncs.rst b/doc/source/reference/c-api/generalized-ufuncs.rst index b4750688b5e6..b8a37e98b81e 100644 --- a/doc/source/reference/c-api/generalized-ufuncs.rst +++ b/doc/source/reference/c-api/generalized-ufuncs.rst @@ -4,6 +4,8 @@ Generalized universal function API ================================== +.. seealso:: :ref:`ufuncs` + There is a general need for looping over not only functions on scalars but also over functions on vectors (or arrays). This concept is realized in NumPy by generalizing the universal functions @@ -239,13 +241,14 @@ In this case, the ufunc author might define the function like this: .. code-block:: c - int minmax_process_core_dims(PyUFuncObject ufunc, + int minmax_process_core_dims(PyUFuncObject *ufunc, npy_intp *core_dim_sizes) { npy_intp n = core_dim_sizes[0]; if (n == 0) { - PyExc_SetString("minmax requires the core dimension " - "to be at least 1."); + PyErr_SetString(PyExc_ValueError, + "minmax requires the core dimension to " + "be at least 1."); return -1; } return 0; @@ -267,7 +270,7 @@ dimension size will result in an exception being raised. With the can set the output size to whatever is appropriate for the ufunc. In the array passed to the "hook" function, core dimensions that -were not determined by the input are indicating by having the value -1 +were not determined by the input are indicated by having the value -1 in the ``core_dim_sizes`` array. The function can replace the -1 with whatever value is appropriate for the ufunc, based on the core dimensions that occurred in the input arrays. diff --git a/doc/source/reference/c-api/types-and-structures.rst b/doc/source/reference/c-api/types-and-structures.rst index 1790c4f4d04d..a039af130860 100644 --- a/doc/source/reference/c-api/types-and-structures.rst +++ b/doc/source/reference/c-api/types-and-structures.rst @@ -365,7 +365,7 @@ PyArrayDescr_Type and PyArray_Descr places an item of this type: ``offsetof(struct {char c; type v;}, v)`` - See `PyDataType_ALIGNMENT` for a way to access this field in a NumPy 1.x + See :c:func:`PyDataType_ALIGNMENT` for a way to access this field in a NumPy 1.x compatible way. .. c:member:: PyObject *metadata @@ -728,6 +728,7 @@ PyArrayMethod_Context and PyArrayMethod_Spec PyObject *caller; struct PyArrayMethodObject_tag *method; PyArray_Descr *const *descriptors; + void *parameters; } PyArrayMethod_Context .. c:member:: PyObject *caller @@ -744,6 +745,15 @@ PyArrayMethod_Context and PyArrayMethod_Spec An array of descriptors for the ufunc loop, filled in by ``resolve_descriptors``. The length of the array is ``nin`` + ``nout``. + .. c:member:: void *parameters + + A pointer to a structure containing any runtime parameters needed by the + loop. This is ``NULL`` if no parameters are needed. The type of the + struct is specific to the registered function. + + .. versionchanged:: NumPy 2.4 + The `parameters` member was added in NumPy 2.4. + .. c:type:: PyArrayMethod_Spec A struct used to register an ArrayMethod with NumPy. We use the slots @@ -1608,7 +1618,7 @@ for completeness and assistance in understanding the code. The C-structure associated with :c:var:`PyArrayMapIter_Type`. This structure is useful if you are trying to understand the advanced-index mapping code. It is defined in the - ``arrayobject.h`` header. This type is not exposed to Python and + ``multiarray/mapping.h`` header. This type is not exposed to Python and could be replaced with a C-structure. As a Python type it takes advantage of reference- counted memory management. @@ -1618,7 +1628,7 @@ NumPy C-API and C complex When you use the NumPy C-API, you will have access to complex real declarations ``npy_cdouble`` and ``npy_cfloat``, which are declared in terms of the C standard types from ``complex.h``. Unfortunately, ``complex.h`` contains -`#define I ...`` (where the actual definition depends on the compiler), which +``#define I ...`` (where the actual definition depends on the compiler), which means that any downstream user that does ``#include `` could get ``I`` defined, and using something like declaring ``double I;`` in their code will result in an obscure compiler error like diff --git a/doc/source/reference/distutils.rst b/doc/source/reference/distutils.rst deleted file mode 100644 index 714c8836322e..000000000000 --- a/doc/source/reference/distutils.rst +++ /dev/null @@ -1,219 +0,0 @@ -.. _numpy-distutils-refguide: - -********* -Packaging -********* - -.. module:: numpy.distutils - -.. warning:: - - ``numpy.distutils`` is deprecated, and will be removed for - Python >= 3.12. For more details, see :ref:`distutils-status-migration` - -.. warning:: - - Note that ``setuptools`` does major releases often and those may contain - changes that break :mod:`numpy.distutils`, which will *not* be updated anymore - for new ``setuptools`` versions. It is therefore recommended to set an - upper version bound in your build configuration for the last known version - of ``setuptools`` that works with your build. - -NumPy provides enhanced distutils functionality to make it easier to -build and install sub-packages, auto-generate code, and extension -modules that use Fortran-compiled libraries. A useful :class:`Configuration -` class is also provided in -:mod:`numpy.distutils.misc_util` that can make it easier to construct -keyword arguments to pass to the setup function (by passing the -dictionary obtained from the todict() method of the class). More -information is available in the :ref:`distutils-user-guide`. - -The choice and location of linked libraries such as BLAS and LAPACK as well as -include paths and other such build options can be specified in a ``site.cfg`` -file located in the NumPy root repository or a ``.numpy-site.cfg`` file in your -home directory. See the ``site.cfg.example`` example file included in the NumPy -repository or sdist for documentation. - -.. index:: - single: distutils - - -Modules in :mod:`numpy.distutils` -================================= -.. toctree:: - :maxdepth: 2 - - distutils/misc_util - - -.. currentmodule:: numpy.distutils - -.. autosummary:: - :toctree: generated/ - - ccompiler - ccompiler_opt - cpuinfo.cpu - core.Extension - exec_command - log.set_verbosity - system_info.get_info - system_info.get_standard_file - - -Configuration class -=================== - -.. currentmodule:: numpy.distutils.misc_util - -.. class:: Configuration(package_name=None, parent_name=None, top_path=None, package_path=None, **attrs) - - Construct a configuration instance for the given package name. If - *parent_name* is not None, then construct the package as a - sub-package of the *parent_name* package. If *top_path* and - *package_path* are None then they are assumed equal to - the path of the file this instance was created in. The setup.py - files in the numpy distribution are good examples of how to use - the :class:`Configuration` instance. - - .. automethod:: todict - - .. automethod:: get_distribution - - .. automethod:: get_subpackage - - .. automethod:: add_subpackage - - .. automethod:: add_data_files - - .. automethod:: add_data_dir - - .. automethod:: add_include_dirs - - .. automethod:: add_headers - - .. automethod:: add_extension - - .. automethod:: add_library - - .. automethod:: add_scripts - - .. automethod:: add_installed_library - - .. automethod:: add_npy_pkg_config - - .. automethod:: paths - - .. automethod:: get_config_cmd - - .. automethod:: get_build_temp_dir - - .. automethod:: have_f77c - - .. automethod:: have_f90c - - .. automethod:: get_version - - .. automethod:: make_svn_version_py - - .. automethod:: make_config_py - - .. automethod:: get_info - -Building installable C libraries -================================ - -Conventional C libraries (installed through `add_library`) are not installed, and -are just used during the build (they are statically linked). An installable C -library is a pure C library, which does not depend on the python C runtime, and -is installed such that it may be used by third-party packages. To build and -install the C library, you just use the method `add_installed_library` instead of -`add_library`, which takes the same arguments except for an additional -``install_dir`` argument:: - - .. hidden in a comment so as to be included in refguide but not rendered documentation - >>> import numpy.distutils.misc_util - >>> config = np.distutils.misc_util.Configuration(None, '', '.') - >>> with open('foo.c', 'w') as f: pass - - >>> config.add_installed_library('foo', sources=['foo.c'], install_dir='lib') - -npy-pkg-config files --------------------- - -To make the necessary build options available to third parties, you could use -the `npy-pkg-config` mechanism implemented in `numpy.distutils`. This mechanism is -based on a .ini file which contains all the options. A .ini file is very -similar to .pc files as used by the pkg-config unix utility:: - - [meta] - Name: foo - Version: 1.0 - Description: foo library - - [variables] - prefix = /home/user/local - libdir = ${prefix}/lib - includedir = ${prefix}/include - - [default] - cflags = -I${includedir} - libs = -L${libdir} -lfoo - -Generally, the file needs to be generated during the build, since it needs some -information known at build time only (e.g. prefix). This is mostly automatic if -one uses the `Configuration` method `add_npy_pkg_config`. Assuming we have a -template file foo.ini.in as follows:: - - [meta] - Name: foo - Version: @version@ - Description: foo library - - [variables] - prefix = @prefix@ - libdir = ${prefix}/lib - includedir = ${prefix}/include - - [default] - cflags = -I${includedir} - libs = -L${libdir} -lfoo - -and the following code in setup.py:: - - >>> config.add_installed_library('foo', sources=['foo.c'], install_dir='lib') - >>> subst = {'version': '1.0'} - >>> config.add_npy_pkg_config('foo.ini.in', 'lib', subst_dict=subst) - -This will install the file foo.ini into the directory package_dir/lib, and the -foo.ini file will be generated from foo.ini.in, where each ``@version@`` will be -replaced by ``subst_dict['version']``. The dictionary has an additional prefix -substitution rule automatically added, which contains the install prefix (since -this is not easy to get from setup.py). - -Reusing a C library from another package ----------------------------------------- - -Info are easily retrieved from the `get_info` function in -`numpy.distutils.misc_util`:: - - >>> info = np.distutils.misc_util.get_info('npymath') - >>> config.add_extension('foo', sources=['foo.c'], extra_info=info) - - - -An additional list of paths to look for .ini files can be given to `get_info`. - -Conversion of ``.src`` files -============================ - -NumPy distutils supports automatic conversion of source files named -.src. This facility can be used to maintain very similar -code blocks requiring only simple changes between blocks. During the -build phase of setup, if a template file named .src is -encountered, a new file named is constructed from the -template and placed in the build directory to be used instead. Two -forms of template conversion are supported. The first form occurs for -files named .ext.src where ext is a recognized Fortran -extension (f, f90, f95, f77, for, ftn, pyf). The second form is used -for all other cases. See :ref:`templating`. diff --git a/doc/source/reference/distutils/misc_util.rst b/doc/source/reference/distutils/misc_util.rst deleted file mode 100644 index bbb83a5ab061..000000000000 --- a/doc/source/reference/distutils/misc_util.rst +++ /dev/null @@ -1,7 +0,0 @@ -distutils.misc_util -=================== - -.. automodule:: numpy.distutils.misc_util - :members: - :undoc-members: - :exclude-members: Configuration diff --git a/doc/source/reference/distutils_guide.rst b/doc/source/reference/distutils_guide.rst deleted file mode 100644 index 0a815797ac30..000000000000 --- a/doc/source/reference/distutils_guide.rst +++ /dev/null @@ -1,13 +0,0 @@ -.. _distutils-user-guide: - -``numpy.distutils`` user guide -============================== - -.. warning:: - - ``numpy.distutils`` is deprecated, and will be removed for - Python >= 3.12. For more details, see :ref:`distutils-status-migration` - - -.. include:: ../../DISTUTILS.rst - :start-line: 6 diff --git a/doc/source/reference/distutils_status_migration.rst b/doc/source/reference/distutils_status_migration.rst index 366b0e67f06a..e4ca4fedcf81 100644 --- a/doc/source/reference/distutils_status_migration.rst +++ b/doc/source/reference/distutils_status_migration.rst @@ -3,16 +3,7 @@ Status of ``numpy.distutils`` and migration advice ================================================== -`numpy.distutils` has been deprecated in NumPy ``1.23.0``. It will be removed -for Python 3.12; for Python <= 3.11 it will not be removed until 2 years after -the Python 3.12 release (Oct 2025). - - -.. warning:: - - ``numpy.distutils`` is only tested with ``setuptools < 60.0``, newer - versions may break. See :ref:`numpy-setuptools-interaction` for details. - +``numpy.distutils`` was removed in NumPy ``2.5.0``. Migration advice ---------------- @@ -27,7 +18,7 @@ using a well-designed, modern and reliable build system, we recommend: If you have modest needs (only simple Cython/C extensions; no need for Fortran, BLAS/LAPACK, nested ``setup.py`` files, or other features of -``numpy.distutils``) and have been happy with ``numpy.distutils`` so far, you +``numpy.distutils``) and have been happy with ``numpy.distutils``, you can also consider switching to ``setuptools``. Note that most functionality of ``numpy.distutils`` is unlikely to be ported to ``setuptools``. @@ -47,7 +38,7 @@ migrating. For more details about the SciPy migration, see: - `RFC: switch to Meson as a build system `__ - `Tracking issue for Meson support `__ -NumPy will migrate to Meson for the 1.26 release. +NumPy migrated to Meson for the 1.26 release. Moving to CMake / scikit-build @@ -73,15 +64,12 @@ present in ``setuptools``: - Support for a few other scientific libraries, like FFTW and UMFPACK - Better MinGW support - Per-compiler build flag customization (e.g. `-O3` and `SSE2` flags are default) -- a simple user build config system, see `site.cfg.example `__ +- a simple user build config system, see `site.cfg.example `__ - SIMD intrinsics support - Support for the NumPy-specific ``.src`` templating format for ``.c``/``.h`` files -The most widely used feature is nested ``setup.py`` files. This feature may -perhaps still be ported to ``setuptools`` in the future (it needs a volunteer -though, see `gh-18588 `__ for -status). Projects only using that feature could move to ``setuptools`` after -that is done. In case a project uses only a couple of ``setup.py`` files, it +The most widely used feature is nested ``setup.py`` files. In case a project +uses only a couple of ``setup.py`` files, it also could make sense to simply aggregate all the content of those files into a single ``setup.py`` file and then move to ``setuptools``. This involves dropping all ``Configuration`` instances, and using ``Extension`` instead. @@ -100,29 +88,6 @@ E.g.,:: For more details, see the `setuptools documentation `__ - -.. _numpy-setuptools-interaction: - -Interaction of ``numpy.distutils`` with ``setuptools`` ------------------------------------------------------- - -It is recommended to use ``setuptools < 60.0``. Newer versions may work, but -are not guaranteed to. The reason for this is that ``setuptools`` 60.0 enabled -a vendored copy of ``distutils``, including backwards incompatible changes that -affect some functionality in ``numpy.distutils``. - -If you are using only simple Cython or C extensions with minimal use of -``numpy.distutils`` functionality beyond nested ``setup.py`` files (its most -popular feature, see :class:`Configuration `), -then latest ``setuptools`` is likely to continue working. In case of problems, -you can also try ``SETUPTOOLS_USE_DISTUTILS=stdlib`` to avoid the backwards -incompatible changes in ``setuptools``. - -Whatever you do, it is recommended to put an upper bound on your ``setuptools`` -build requirement in ``pyproject.toml`` to avoid future breakage - see -:ref:`for-downstream-package-authors`. - - .. _CMake: https://cmake.org/ .. _Meson: https://mesonbuild.com/ .. _meson-python: https://meson-python.readthedocs.io diff --git a/doc/source/reference/index.rst b/doc/source/reference/index.rst index 02e3248953fb..2a7ac83a96ca 100644 --- a/doc/source/reference/index.rst +++ b/doc/source/reference/index.rst @@ -40,7 +40,6 @@ Python API :maxdepth: 1 typing - distutils C API ===== @@ -61,8 +60,8 @@ Other topics thread_safety global_state security + testing distutils_status_migration - distutils_guide swig diff --git a/doc/source/reference/maskedarray.generic.rst b/doc/source/reference/maskedarray.generic.rst index 9c44ebcbc589..4f53c6146b53 100644 --- a/doc/source/reference/maskedarray.generic.rst +++ b/doc/source/reference/maskedarray.generic.rst @@ -20,8 +20,8 @@ What is a masked array? ----------------------- In many circumstances, datasets can be incomplete or tainted by the presence -of invalid data. For example, a sensor may have failed to record a data, or -recorded an invalid value. The :mod:`numpy.ma` module provides a convenient +of invalid data. For example, a sensor may have failed to record a data point, +or recorded an invalid value. The :mod:`numpy.ma` module provides a convenient way to address this issue, by introducing masked arrays. A masked array is the combination of a standard :class:`numpy.ndarray` and a @@ -66,7 +66,7 @@ attributes and methods are described in more details in the .. try_examples:: -The :mod:`numpy.ma` module can be used as an addition to :mod:`numpy`: + The :mod:`numpy.ma` module can be used as an addition to :mod:`numpy`: >>> import numpy as np >>> import numpy.ma as ma @@ -521,7 +521,7 @@ Numerical operations -------------------- Numerical operations can be easily performed without worrying about missing -values, dividing by zero, square roots of negative numbers, etc.:: +values, dividing by zero, square roots of negative numbers, etc.: .. try_examples:: diff --git a/doc/source/reference/module_structure.rst b/doc/source/reference/module_structure.rst index 01a5bcff7fbc..5c6d8139b055 100644 --- a/doc/source/reference/module_structure.rst +++ b/doc/source/reference/module_structure.rst @@ -5,7 +5,7 @@ NumPy's module structure ************************ NumPy has a large number of submodules. Most regular usage of NumPy requires -only the main namespace and a smaller set of submodules. The rest either either +only the main namespace and a smaller set of submodules. The rest either have special-purpose or niche namespaces. Main namespaces @@ -44,7 +44,6 @@ Prefer not to use these namespaces for new code. There are better alternatives and/or this code is deprecated or isn't reliable. - :ref:`numpy.char ` - legacy string functionality, only for fixed-width strings -- :ref:`numpy.distutils ` (deprecated) - build system support - :ref:`numpy.f2py ` - Fortran binding generation (usually used from the command line only) - :ref:`numpy.ma ` - masked arrays (not very reliable, needs an overhaul) - :ref:`numpy.matlib ` (pending deprecation) - functions supporting ``matrix`` instances @@ -70,7 +69,6 @@ and/or this code is deprecated or isn't reliable. numpy.rec numpy.version numpy.char - numpy.distutils numpy.f2py <../f2py/index> numpy.ma numpy.matlib diff --git a/doc/source/reference/random/multithreading.rst b/doc/source/reference/random/multithreading.rst index 17c6a515cdbc..28e045f10dc0 100644 --- a/doc/source/reference/random/multithreading.rst +++ b/doc/source/reference/random/multithreading.rst @@ -9,7 +9,10 @@ well-behaved (writable and aligned). Under normal circumstances, arrays created using the common constructors such as :meth:`numpy.empty` will satisfy these requirements. -This example makes use of:mod:`concurrent.futures` to fill an array using +.. seealso:: + :ref:`thread_safety` for general information about thread safety in NumPy. + +This example makes use of :mod:`concurrent.futures` to fill an array using multiple threads. Threads are long-lived so that repeated calls do not require any additional overheads from thread creation. diff --git a/doc/source/reference/routines.err.rst b/doc/source/reference/routines.err.rst index 5272073a3b00..f46634793fa3 100644 --- a/doc/source/reference/routines.err.rst +++ b/doc/source/reference/routines.err.rst @@ -1,8 +1,80 @@ +.. _fp_error_handling: + Floating point error handling ============================= .. currentmodule:: numpy +Error handling settings are stored in :py:mod:`python:contextvars` +allowing different threads or async tasks to have independent configurations. +For more information, see :ref:`thread_safety`. + +.. _misc-error-handling: + +How numpy handles numerical exceptions +-------------------------------------- + +The default is to ``'warn'`` for ``invalid``, ``divide``, and ``overflow`` +and ``'ignore'`` for ``underflow``. But this can be changed, and it can be +set individually for different kinds of exceptions. The different behaviors +are: + +- ``'ignore'`` : Take no action when the exception occurs. +- ``'warn'`` : Print a :py:exc:`RuntimeWarning` (via the Python :py:mod:`warnings` module). +- ``'raise'`` : Raise a :py:exc:`FloatingPointError`. +- ``'call'`` : Call a specified function. +- ``'print'`` : Print a warning directly to ``stdout``. +- ``'log'`` : Record error in a Log object. + +These behaviors can be set for all kinds of errors or specific ones: + +- ``all`` : apply to all numeric exceptions +- ``invalid`` : when NaNs are generated +- ``divide`` : divide by zero (for integers as well!) +- ``overflow`` : floating point overflows +- ``underflow`` : floating point underflows + +Note that integer divide-by-zero is handled by the same machinery. + +The error handling mode can be configured :func:`numpy.errstate` +context manager. + +Examples +-------- + +:: + + >>> with np.errstate(all='warn'): + ... np.zeros(5, dtype=np.float32) / 0.0 + :2: RuntimeWarning: invalid value encountered in divide + array([nan, nan, nan, nan, nan], dtype=float32) + +:: + + >>> with np.errstate(under='ignore'): + ... np.array([1.e-100])**10 + array([0.]) + +:: + + >>> with np.errstate(invalid='raise'): + ... np.sqrt(np.array([-1.])) + ... + Traceback (most recent call last): + File "", line 2, in + np.sqrt(np.array([-1.])) + ~~~~~~~^^^^^^^^^^^^^^^^^ + FloatingPointError: invalid value encountered in sqrt + +:: + + >>> def errorhandler(errstr, errflag): + ... print("saw stupid error!") + >>> with np.errstate(call=errorhandler, all='call'): + ... np.zeros(5, dtype=np.int32) / 0 + saw stupid error! + array([nan, nan, nan, nan, nan]) + Setting and getting error handling ---------------------------------- diff --git a/doc/source/reference/routines.io.rst b/doc/source/reference/routines.io.rst index 2b8dd98f36a4..ccd4467af545 100644 --- a/doc/source/reference/routines.io.rst +++ b/doc/source/reference/routines.io.rst @@ -59,8 +59,15 @@ Memory mapping files memmap lib.format.open_memmap +.. _text_formatting_options: + Text formatting options ----------------------- + +Text formatting settings are maintained in a :py:mod:`context variable `, +allowing different threads or async tasks to have independent configurations. +For more information, see :ref:`thread_safety`. + .. autosummary:: :toctree: generated/ diff --git a/doc/source/reference/routines.polynomials.rst b/doc/source/reference/routines.polynomials.rst index 0763a1cf719a..00b4460eae21 100644 --- a/doc/source/reference/routines.polynomials.rst +++ b/doc/source/reference/routines.polynomials.rst @@ -47,23 +47,28 @@ The `~numpy.polynomial.polynomial.Polynomial` class is imported for brevity:: from numpy.polynomial import Polynomial -+------------------------+------------------------------+---------------------------------------+ -| **How to...** | Legacy (`numpy.poly1d`) | `numpy.polynomial` | -+------------------------+------------------------------+---------------------------------------+ -| Create a | ``p = np.poly1d([1, 2, 3])`` | ``p = Polynomial([3, 2, 1])`` | -| polynomial object | | | -| from coefficients [1]_ | | | -+------------------------+------------------------------+---------------------------------------+ -| Create a polynomial | ``r = np.poly([-1, 1])`` | ``p = Polynomial.fromroots([-1, 1])`` | -| object from roots | ``p = np.poly1d(r)`` | | -+------------------------+------------------------------+---------------------------------------+ -| Fit a polynomial of | | | -| degree ``deg`` to data | ``np.polyfit(x, y, deg)`` | ``Polynomial.fit(x, y, deg)`` | -+------------------------+------------------------------+---------------------------------------+ - ++------------------------+----------------------------------------+---------------------------------------+ +| **How to...** | Legacy (`numpy.poly1d`) | `numpy.polynomial` | ++------------------------+----------------------------------------+---------------------------------------+ +| Create a | ``p = np.poly1d([1, 2, 3])`` | ``p = Polynomial([3, 2, 1])`` | +| polynomial object | | | +| from coefficients [1]_ | | | ++------------------------+----------------------------------------+---------------------------------------+ +| Create a polynomial | ``r = np.poly([-1, 1])`` | ``p = Polynomial.fromroots([-1, 1])`` | +| object from roots | ``p = np.poly1d(r)`` | | ++------------------------+----------------------------------------+---------------------------------------+ +| Fit a polynomial of | | | +| degree ``deg`` to data | ``np.polyfit(x, y, deg)`` | ``Polynomial.fit(x, y, deg)`` | ++------------------------+----------------------------------------+---------------------------------------+ +| Evaluate a polynomial | ``p(2.0)`` or | ``p(2.0)`` or ``polyval(2.0, p.coef)``| +| at a point [2]_ | ``np.polyval([1, 2, 3], 2.0)`` | (use ``p.convert().coef`` after fit) | ++------------------------+----------------------------------------+---------------------------------------+ .. [1] Note the reversed ordering of the coefficients +.. [2] When evaluating polynomials created with ``fit()``, use ``p(x)`` or + ``polyval(x, p.convert().coef)`` to handle domain/window scaling correctly. + Transition Guide ~~~~~~~~~~~~~~~~ @@ -188,3 +193,4 @@ Documentation for legacy polynomials :maxdepth: 2 routines.polynomials.poly1d + \ No newline at end of file diff --git a/doc/source/reference/routines.set.rst b/doc/source/reference/routines.set.rst index fbb5afdc1b75..47080f96fff8 100644 --- a/doc/source/reference/routines.set.rst +++ b/doc/source/reference/routines.set.rst @@ -19,7 +19,6 @@ Boolean operations .. autosummary:: :toctree: generated/ - in1d intersect1d isin setdiff1d diff --git a/doc/source/reference/simd/build-options.rst b/doc/source/reference/simd/build-options.rst index 524a1532ca57..229a9ebbae0a 100644 --- a/doc/source/reference/simd/build-options.rst +++ b/doc/source/reference/simd/build-options.rst @@ -1,222 +1,394 @@ ***************** -CPU build options +CPU Build Options ***************** -Description ------------ +Overview +-------- -The following options are mainly used to change the default behavior of optimizations -that target certain CPU features: +NumPy provides configuration options to optimize performance based on CPU capabilities. +These options allow you to specify which CPU features to support, balancing performance, compatibility, and binary size. +This document explains how to use these options effectively across various CPU architectures. -- ``cpu-baseline``: minimal set of required CPU features. - Default value is ``min`` which provides the minimum CPU features that can - safely run on a wide range of platforms within the processor family. +Key Configuration Options +------------------------- - .. note:: +NumPy uses several build options to control CPU optimizations: - During the runtime, NumPy modules will fail to load if any of specified features - are not supported by the target CPU (raises Python runtime error). +- ``cpu-baseline``: The minimum set of CPU features required to run the compiled NumPy. + + * Default: ``min`` (provides compatibility across a wide range of platforms) + * If your target CPU doesn't support all specified baseline features, NumPy will fail to load with a Python runtime error -- ``cpu-dispatch``: dispatched set of additional CPU features. - Default value is ``max -xop -fma4`` which enables all CPU - features, except for AMD legacy features (in case of X86). +- ``cpu-baseline-detect``: controls detection of CPU baseline based on compiler + flags. Default value is ``auto`` that enables detection if ``-march=`` + or a similar compiler flag is used. The other possible values are ``enabled`` + and ``disabled`` to respective enable or disable it unconditionally. - .. note:: +- ``cpu-dispatch``: Additional CPU features for which optimized code paths will be generated. + + * Default: ``max`` (enables all available optimizations) + * At runtime, NumPy will automatically select the fastest available code path based on your CPU's capabilities - During the runtime, NumPy modules will skip any specified features - that are not available in the target CPU. +- ``disable-optimization``: Completely disables all CPU optimizations. + + * Default: ``false`` (optimizations are enabled) + * When set to ``true``, disables all CPU optimized code including dispatch, SIMD, and loop unrolling + * Useful for debugging, testing, or in environments where optimization causes issues -These options are accessible at build time by passing setup arguments to meson-python -via the build frontend (e.g., ``pip`` or ``build``). -They accept a set of :ref:`CPU features ` -or groups of features that gather several features or -:ref:`special options ` that -perform a series of procedures. +These options are specified at build time via meson-python arguments:: -To customize CPU/build options:: + pip install . -Csetup-args=-Dcpu-baseline="min" -Csetup-args=-Dcpu-dispatch="max" + # or through spin + spin build -- -Dcpu-baseline="min" -Dcpu-dispatch="max" - pip install . -Csetup-args=-Dcpu-baseline="avx2 fma3" -Csetup-args=-Dcpu-dispatch="max" +``cpu-baseline`` and ``cpu-dispatch`` can be set to specific :ref:`CPU groups, features`, or :ref:`special options ` +that perform specific actions. The following sections describe these options in detail. -Quick start ------------ +Common Usage Scenarios +---------------------- -In general, the default settings tend to not impose certain CPU features that -may not be available on some older processors. Raising the ceiling of the -baseline features will often improve performance and may also reduce -binary size. +Building for Local Use Only +~~~~~~~~~~~~~~~~~~~~~~~~~~~ +When building for your machine only and not planning to distribute:: -The following are the most common scenarios that may require changing -the default settings: + python -m build --wheel -Csetup-args=-Dcpu-baseline="native" -Csetup-args=-Dcpu-dispatch="none" +This automatically detects and uses all CPU features available on your machine. -I am building NumPy for my local use -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -And I do not intend to export the build to other users or target a -different CPU than what the host has. +.. note:: + A fatal error will be raised if ``native`` isn't supported by the host platform. -Set ``native`` for baseline, or manually specify the CPU features in case of option -``native`` isn't supported by your platform:: +Excluding Specific Features +~~~~~~~~~~~~~~~~~~~~~~~~~~~ - python -m build --wheel -Csetup-args=-Dcpu-baseline="native" +You may want to exclude certain CPU features from the dispatched features:: -Building NumPy with extra CPU features isn't necessary for this case, -since all supported features are already defined within the baseline features:: + # For x86-64: exclude all AVX-512 features + python -m build --wheel -Csetup-args=-Dcpu-dispatch="max -X86_V4" - python -m build --wheel -Csetup-args=-Dcpu-baseline="native" \ - -Csetup-args=-Dcpu-dispatch="none" + # For ARM64: exclude SVE + python -m build --wheel -Csetup-args=-Dcpu-dispatch="max -SVE" .. note:: + Excluding a feature will also exclude any successor features that are + implied by the excluded feature. For example, excluding ``X86_V4`` will + exclude ``AVX512_ICL`` and ``AVX512_SPR`` as well. + +Targeting Older CPUs +~~~~~~~~~~~~~~~~~~~~ + +On ``x86-64``, by default the baseline is set to ``min`` which maps to ``X86_V2``. +This unsuitable for older CPUs (before 2009) or old virtual machines. +To address this, set the baseline to ``none``:: + + python -m build --wheel -Csetup-args=-Dcpu-baseline="none" + +This will create a build that is compatible with all x86 CPUs, but +without any manual optimizations or SIMD code paths for the baseline. +The build will rely only on dispatched code paths for optimization. - A fatal error will be raised if ``native`` isn't supported by the host platform. +Targeting Newer CPUs +~~~~~~~~~~~~~~~~~~~~ -I do not want to support the old processors of the x86 architecture -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Raising the baseline improves performance for two main reasons: -Since most of the CPUs nowadays support at least ``AVX``, ``F16C`` features, you can use:: +1. Dispatched kernels don't cover all code paths +2. A higher baseline leads to smaller binary size as the compiler won't generate code paths for excluded dispatched features - python -m build --wheel -Csetup-args=-Dcpu-baseline="avx f16c" +For CPUs from 2015 and newer, setting the baseline to ``X86_V3`` may be suitable:: + + python -m build --wheel -Csetup-args=-Dcpu-baseline="min+X86_V3" + +.. _opt-supported-features: + +Supported CPU Features By Architecture +-------------------------------------- + +NumPy supports optimized code paths for multiple CPU architectures. Below are the supported feature groups for each architecture. +The name of the feature group can be used in the build options ``cpu-baseline`` and ``cpu-dispatch``. + +X86 +~~~ +.. list-table:: + :header-rows: 1 + :align: left + + * - Name + - Implies + - Includes + * - ``X86_V2`` + - + - ``SSE`` ``SSE2`` ``SSE3`` ``SSSE3`` ``SSE4_1`` ``SSE4_2`` ``POPCNT`` ``CX16`` ``LAHF`` + * - ``X86_V3`` + - ``X86_V2`` + - ``AVX`` ``AVX2`` ``FMA3`` ``BMI`` ``BMI2`` ``LZCNT`` ``F16C`` ``MOVBE`` + * - ``X86_V4`` + - ``X86_V3`` + - ``AVX512F`` ``AVX512CD`` ``AVX512VL`` ``AVX512BW`` ``AVX512DQ`` + * - ``AVX512_ICL`` + - ``X86_V4`` + - ``AVX512VBMI`` ``AVX512VBMI2`` ``AVX512VNNI`` ``AVX512BITALG`` ``AVX512VPOPCNTDQ`` ``AVX512IFMA`` ``VAES`` ``GFNI`` ``VPCLMULQDQ`` + * - ``AVX512_SPR`` + - ``AVX512_ICL`` + - ``AVX512FP16`` + +These groups correspond to CPU generations: + +- ``X86_V2``: x86-64-v2 microarchitectures (CPUs since 2009) +- ``X86_V3``: x86-64-v3 microarchitectures (CPUs since 2015) +- ``X86_V4``: x86-64-v4 microarchitectures (AVX-512 capable CPUs) +- ``AVX512_ICL``: Intel Ice Lake and similar CPUs +- ``AVX512_SPR``: Intel Sapphire Rapids and newer CPUs .. note:: + On 32-bit x86, ``cx16`` is excluded from ``X86_V2``. + +On IBM/POWER big-endian +~~~~~~~~~~~~~~~~~~~~~~~ +.. list-table:: + :header-rows: 1 + :align: left + + * - Name + - Implies + * - ``VSX`` + - + * - ``VSX2`` + - ``VSX`` + * - ``VSX3`` + - ``VSX`` ``VSX2`` + * - ``VSX4`` + - ``VSX`` ``VSX2`` ``VSX3`` + +On IBM/POWER little-endian +~~~~~~~~~~~~~~~~~~~~~~~~~~ +.. list-table:: + :header-rows: 1 + :align: left + + * - Name + - Implies + * - ``VSX`` + - ``VSX2`` + * - ``VSX2`` + - ``VSX`` + * - ``VSX3`` + - ``VSX`` ``VSX2`` + * - ``VSX4`` + - ``VSX`` ``VSX2`` ``VSX3`` + +On ARMv7/A32 +~~~~~~~~~~~~ +.. list-table:: + :header-rows: 1 + :align: left + + * - Name + - Implies + * - ``NEON`` + - + * - ``NEON_FP16`` + - ``NEON`` + * - ``NEON_VFPV4`` + - ``NEON`` ``NEON_FP16`` + * - ``ASIMD`` + - ``NEON`` ``NEON_FP16`` ``NEON_VFPV4`` + * - ``ASIMDHP`` + - ``NEON`` ``NEON_FP16`` ``NEON_VFPV4`` ``ASIMD`` + * - ``ASIMDDP`` + - ``NEON`` ``NEON_FP16`` ``NEON_VFPV4`` ``ASIMD`` + * - ``ASIMDFHM`` + - ``NEON`` ``NEON_FP16`` ``NEON_VFPV4`` ``ASIMD`` ``ASIMDHP`` + +On ARMv8/A64 +~~~~~~~~~~~~ +.. list-table:: + :header-rows: 1 + :align: left + + * - Name + - Implies + * - ``NEON`` + - ``NEON_FP16`` ``NEON_VFPV4`` ``ASIMD`` + * - ``NEON_FP16`` + - ``NEON`` ``NEON_VFPV4`` ``ASIMD`` + * - ``NEON_VFPV4`` + - ``NEON`` ``NEON_FP16`` ``ASIMD`` + * - ``ASIMD`` + - ``NEON`` ``NEON_FP16`` ``NEON_VFPV4`` + * - ``ASIMDHP`` + - ``NEON`` ``NEON_FP16`` ``NEON_VFPV4`` ``ASIMD`` + * - ``ASIMDDP`` + - ``NEON`` ``NEON_FP16`` ``NEON_VFPV4`` ``ASIMD`` + * - ``ASIMDFHM`` + - ``NEON`` ``NEON_FP16`` ``NEON_VFPV4`` ``ASIMD`` ``ASIMDHP`` + +On IBM/ZSYSTEM(S390X) +~~~~~~~~~~~~~~~~~~~~~ +.. list-table:: + :header-rows: 1 + :align: left + + * - Name + - Implies + * - ``VX`` + - + * - ``VXE`` + - ``VX`` + * - ``VXE2`` + - ``VX`` ``VXE`` + +On RISCV64 +~~~~~~~~~~~~~~~~~~~~~ +.. list-table:: + :header-rows: 1 + :align: left + + * - Name + - Implies + * - ``RVV`` + - + +.. _opt-special-options: - ``cpu-baseline`` force combine all implied features, so there's no need - to add SSE features. +Special Options +--------------- +Beyond specific feature names, you can use these special values: -I'm facing the same case above but with ppc64 architecture -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +``NONE`` +~~~~~~~~ -Then raise the ceiling of the baseline features to Power8:: +Enables no features (equivalent to an empty string). - python -m build --wheel -Csetup-args=-Dcpu-baseline="vsx2" +``NATIVE`` +~~~~~~~~~~ -Having issues with AVX512 features? -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Enables all features supported by the host CPU. -You may have some reservations about including of ``AVX512`` or -any other CPU feature and you want to exclude from the dispatched features:: +``DETECT`` +~~~~~~~~~~ - python -m build --wheel -Csetup-args=-Dcpu-dispatch="max -avx512f -avx512cd \ - -avx512_knl -avx512_knm -avx512_skx -avx512_clx -avx512_cnl -avx512_icl -avx512_spr" +Detects the features enabled by the compiler. This option is appended by default +to ``cpu-baseline`` if ``-march``, ``-mcpu``, ``-xhost``, or ``/QxHost`` is set in +the environment variable ``CFLAGS`` unless ``cpu-baseline-detect`` is ``disabled``. -.. _opt-supported-features: +``MIN`` +~~~~~~~ -Supported features ------------------- +Enables the minimum CPU features for each architecture: -The names of the features can express one feature or a group of features, -as shown in the following tables supported depend on the lowest interest: +.. list-table:: + :header-rows: 1 + :align: left -.. note:: + * - For Arch + - Implies + * - x86 (32-bit) + - ``X86_V2`` + * - x86-64 + - ``X86_V2`` + * - IBM/POWER (big-endian) + - ``NONE`` + * - IBM/POWER (little-endian) + - ``VSX`` ``VSX2`` + * - ARMv7/ARMHF + - ``NONE`` + * - ARMv8/AArch64 + - ``NEON`` ``NEON_FP16`` ``NEON_VFPV4`` ``ASIMD`` + * - IBM/ZSYSTEM(S390X) + - ``NONE`` + * - riscv64 + - ``NONE`` - The following features may not be supported by all compilers, - also some compilers may produce different set of implied features - when it comes to features like ``AVX512``, ``AVX2``, and ``FMA3``. - See :ref:`opt-platform-differences` for more details. -.. include:: generated_tables/cpu_features.inc +``MAX`` +~~~~~~~ -.. _opt-special-options: +Enables all features supported by the compiler and platform. -Special options ---------------- +Operator Operators (``-``/``+``) +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Remove or add specific features, useful with ``MAX``, ``MIN``, and ``NATIVE``: -- ``NONE``: enable no features. +- Adding a feature (``+``) includes all implied features +- Removing a feature (``-``) excludes all successor features that imply the removed feature -- ``NATIVE``: Enables all CPU features that supported by the host CPU, - this operation is based on the compiler flags (``-march=native``, ``-xHost``, ``/QxHost``) +Examples:: -- ``MIN``: Enables the minimum CPU features that can safely run on a wide range of platforms: + python -m build --wheel -Csetup-args=-Dcpu-dispatch="max-X86_V4" + python -m build --wheel -Csetup-args=-Dcpu-baseline="min+X86_V4" - .. table:: - :align: left +Usage And Behaviors +------------------- - ====================================== ======================================= - For Arch Implies - ====================================== ======================================= - x86 (32-bit mode) ``SSE`` ``SSE2`` - x86_64 ``SSE`` ``SSE2`` ``SSE3`` - IBM/POWER (big-endian mode) ``NONE`` - IBM/POWER (little-endian mode) ``VSX`` ``VSX2`` - ARMHF ``NONE`` - ARM64 A.K. AARCH64 ``NEON`` ``NEON_FP16`` ``NEON_VFPV4`` - ``ASIMD`` - IBM/ZSYSTEM(S390X) ``NONE`` - ====================================== ======================================= +Case Insensitivity +~~~~~~~~~~~~~~~~~~ -- ``MAX``: Enables all supported CPU features by the compiler and platform. +CPU features and options are case-insensitive:: -- ``Operators-/+``: remove or add features, useful with options ``MAX``, ``MIN`` and ``NATIVE``. + python -m build --wheel -Csetup-args=-Dcpu-dispatch="X86_v4" -Behaviors ---------- +Mixing Features across Architectures +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -- CPU features and other options are case-insensitive, for example:: +You can mix features from different architectures:: - python -m build --wheel -Csetup-args=-Dcpu-dispatch="SSE41 avx2 FMA3" + python -m build --wheel -Csetup-args=-Dcpu-baseline="X86_V4 VSX4 SVE" -- The order of the requested optimizations doesn't matter:: +Order Independence +~~~~~~~~~~~~~~~~~~ - python -m build --wheel -Csetup-args=-Dcpu-dispatch="SSE41 AVX2 FMA3" - # equivalent to - python -m build --wheel -Csetup-args=-Dcpu-dispatch="FMA3 AVX2 SSE41" +The order of specified features doesn't matter:: -- Either commas or spaces or '+' can be used as a separator, - for example:: + python -m build --wheel -Csetup-args=-Dcpu-dispatch="SVE X86_V4 x86_v3" - python -m build --wheel -Csetup-args=-Dcpu-dispatch="avx2 avx512f" - # or - python -m build --wheel -Csetup-args=-Dcpu-dispatch=avx2,avx512f - # or - python -m build --wheel -Csetup-args=-Dcpu-dispatch="avx2+avx512f" +Separators +~~~~~~~~~~ - all works but arguments should be enclosed in quotes or escaped - by backslash if any spaces are used. +You can use spaces or commas as separators:: -- ``cpu-baseline`` combines all implied CPU features, for example:: + # All of these are equivalent + python -m build --wheel -Csetup-args=-Dcpu-dispatch="X86_V2 X86_V4" + python -m build --wheel -Csetup-args=-Dcpu-dispatch=X86_V2,X86_V4 - python -m build --wheel -Csetup-args=-Dcpu-baseline=sse42 - # equivalent to - python -m build --wheel -Csetup-args=-Dcpu-baseline="sse sse2 sse3 ssse3 sse41 popcnt sse42" +Feature Combination +~~~~~~~~~~~~~~~~~~~ -- ``cpu-baseline`` will be treated as "native" if compiler native flag - ``-march=native`` or ``-xHost`` or ``/QxHost`` is enabled through environment variable - ``CFLAGS``:: +Features specified in options are automatically combined with all implied features:: - export CFLAGS="-march=native" - pip install . - # is equivalent to - pip install . -Csetup-args=-Dcpu-baseline=native + python -m build --wheel -Csetup-args=-Dcpu-baseline=X86_V4 -- ``cpu-baseline`` escapes any specified features that aren't supported - by the target platform or compiler rather than raising fatal errors. +Equivalent to:: - .. note:: + python -m build --wheel -Csetup-args=-Dcpu-baseline="X86_V2 X86_V3 X86_V4" - Since ``cpu-baseline`` combines all implied features, the maximum - supported of implied features will be enabled rather than escape all of them. - For example:: +Baseline Overlapping +~~~~~~~~~~~~~~~~~~~~ - # Requesting `AVX2,FMA3` but the compiler only support **SSE** features - python -m build --wheel -Csetup-args=-Dcpu-baseline="avx2 fma3" - # is equivalent to - python -m build --wheel -Csetup-args=-Dcpu-baseline="sse sse2 sse3 ssse3 sse41 popcnt sse42" +Features specified in ``cpu-baseline`` will be excluded from the ``cpu-dispatch`` features, +along with their implied features, but without excluding successor features that imply them. -- ``cpu-dispatch`` does not combine any of implied CPU features, - so you must add them unless you want to disable one or all of them:: +For instance, if you specify ``cpu-baseline="X86_V4"``, it will exclude ``X86_V4`` and its +implied features ``X86_V2`` and ``X86_V3`` from the ``cpu-dispatch`` features. - # Only dispatches AVX2 and FMA3 - python -m build --wheel -Csetup-args=-Dcpu-dispatch=avx2,fma3 - # Dispatches AVX and SSE features - python -m build --wheel -Csetup-args=-Dcpu-dispatch=ssse3,sse41,sse42,avx,avx2,fma3 +Compile-time Detection +~~~~~~~~~~~~~~~~~~~~~~ -- ``cpu-dispatch`` escapes any specified baseline features and also escapes - any features not supported by the target platform or compiler without raising - fatal errors. +Specifying features to ``cpu-dispatch`` or ``cpu-baseline`` doesn't explicitly enable them. +Features are detected at compile time, and the maximum available features based on your +specified options will be enabled according to toolchain and platform support. + +This detection occurs by testing feature availability in the compiler through compile-time +source files containing common intrinsics for the specified features. If both the compiler +and assembler support the feature, it will be enabled. + +For example, if you specify ``cpu-dispatch="AVX512_ICL"`` but your compiler doesn't support it, +the feature will be excluded from the build. However, any implied features will still be +enabled if they're supported. -Eventually, you should always check the final report through the build log -to verify the enabled features. See :ref:`opt-build-report` for more details. .. _opt-platform-differences: @@ -234,7 +406,7 @@ The need to align certain CPU features that are assured to be supported by successive generations of the same architecture, some cases: - On ppc64le ``VSX(ISA 2.06)`` and ``VSX2(ISA 2.07)`` both imply one another since the - first generation that supports little-endian mode is Power-8`(ISA 2.07)` + first generation that supports little-endian mode is ``Power-8(ISA 2.07)`` - On AArch64 ``NEON NEON_FP16 NEON_VFPV4 ASIMD`` implies each other since they are part of the hardware baseline. @@ -251,43 +423,6 @@ For example:: Please take a deep look at :ref:`opt-supported-features`, in order to determine the features that imply one another. -**Compilation compatibility** - -Some compilers don't provide independent support for all CPU features. For instance -**Intel**'s compiler doesn't provide separated flags for ``AVX2`` and ``FMA3``, -it makes sense since all Intel CPUs that comes with ``AVX2`` also support ``FMA3``, -but this approach is incompatible with other **x86** CPUs from **AMD** or **VIA**. - -For example:: - - # Specify AVX2 will force enables FMA3 on Intel compilers - python -m build --wheel -Csetup-args=-Dcpu-baseline=avx2 - # which is equivalent to - python -m build --wheel -Csetup-args=-Dcpu-baseline="avx2 fma3" - - -The following tables only show the differences imposed by some compilers from the -general context that been shown in the :ref:`opt-supported-features` tables: - -.. note:: - - Features names with strikeout represent the unsupported CPU features. - -.. raw:: html - - - -.. role:: enabled - :class: enabled-feature - -.. role:: disabled - :class: disabled-feature - -.. include:: generated_tables/compilers-diff.inc - .. _opt-build-report: Build report @@ -300,7 +435,7 @@ expected CPU features by the compiler. So we strongly recommend checking the final report log, to be aware of what kind of CPU features are enabled and what are not. -You can find the final report of CPU optimizations at the end of the build log, +You can find the final report of CPU optimizations by tracing meson build log, and here is how it looks on x86_64/gcc: .. raw:: html @@ -310,94 +445,63 @@ and here is how it looks on x86_64/gcc: .. literalinclude:: log_example.txt :language: bash -There is a separate report for each of ``build_ext`` and ``build_clib`` -that includes several sections, and each section has several values, representing the following: - -**Platform**: - -- :enabled:`Architecture`: The architecture name of target CPU. It should be one of - ``x86``, ``x64``, ``ppc64``, ``ppc64le``, ``armhf``, ``aarch64``, ``s390x`` or ``unknown``. - -- :enabled:`Compiler`: The compiler name. It should be one of - gcc, clang, msvc, icc, iccw or unix-like. - -**CPU baseline**: - -- :enabled:`Requested`: The specific features and options to ``cpu-baseline`` as-is. -- :enabled:`Enabled`: The final set of enabled CPU features. -- :enabled:`Flags`: The compiler flags that were used to all NumPy C/C++ sources - during the compilation except for temporary sources that have been used for generating - the binary objects of dispatched features. -- :enabled:`Extra checks`: list of internal checks that activate certain functionality - or intrinsics related to the enabled features, useful for debugging when it comes - to developing SIMD kernels. - -**CPU dispatch**: - -- :enabled:`Requested`: The specific features and options to ``cpu-dispatch`` as-is. -- :enabled:`Enabled`: The final set of enabled CPU features. -- :enabled:`Generated`: At the beginning of the next row of this property, - the features for which optimizations have been generated are shown in the - form of several sections with similar properties explained as follows: - - - :enabled:`One or multiple dispatched feature`: The implied CPU features. - - :enabled:`Flags`: The compiler flags that been used for these features. - - :enabled:`Extra checks`: Similar to the baseline but for these dispatched features. - - :enabled:`Detect`: Set of CPU features that need be detected in runtime in order to - execute the generated optimizations. - - The lines that come after the above property and end with a ':' on a separate line, - represent the paths of c/c++ sources that define the generated optimizations. .. _runtime-simd-dispatch: -Runtime dispatch +Runtime Dispatch ---------------- + Importing NumPy triggers a scan of the available CPU features from the set -of dispatchable features. This can be further restricted by setting the +of dispatchable features. You can restrict this scan by setting the environment variable ``NPY_DISABLE_CPU_FEATURES`` to a comma-, tab-, or -space-separated list of features to disable. This will raise an error if -parsing fails or if the feature was not enabled. For instance, on ``x86_64`` -this will disable ``AVX2`` and ``FMA3``:: +space-separated list of features to disable. + +For instance, on ``x86_64`` this will disable ``X86_V4``:: - NPY_DISABLE_CPU_FEATURES="AVX2,FMA3" + NPY_DISABLE_CPU_FEATURES="X86_V4" -If the feature is not available, a warning will be emitted. +This will raise an error if parsing fails or if the feature was not enabled through the ``cpu-dispatch`` build option. +If the feature is supported by the build but not available on the current CPU, a warning will be emitted instead. -Tracking dispatched functions +Tracking Dispatched Functions ----------------------------- -Discovering which CPU targets are enabled for different optimized functions is achievable -through the Python function ``numpy.lib.introspect.opt_func_info``. -This function offers the flexibility of applying filters using two optional arguments: -one for refining function names and the other for specifying data types in the signatures. + +You can discover which CPU targets are enabled for different optimized functions using +the Python function ``numpy.lib.introspect.opt_func_info``. + +This function offers two optional arguments for filtering results: + +1. ``func_name`` - For refining function names +2. ``signature`` - For specifying data types in the signatures For example:: >> func_info = numpy.lib.introspect.opt_func_info(func_name='add|abs', signature='float64|complex64') >> print(json.dumps(func_info, indent=2)) { - "absolute": { - "dd": { - "current": "SSE41", - "available": "SSE41 baseline(SSE SSE2 SSE3)" - }, - "Ff": { - "current": "FMA3__AVX2", - "available": "AVX512F FMA3__AVX2 baseline(SSE SSE2 SSE3)" - }, - "Dd": { - "current": "FMA3__AVX2", - "available": "AVX512F FMA3__AVX2 baseline(SSE SSE2 SSE3)" - } - }, - "add": { - "ddd": { - "current": "FMA3__AVX2", - "available": "FMA3__AVX2 baseline(SSE SSE2 SSE3)" - }, - "FFF": { - "current": "FMA3__AVX2", - "available": "FMA3__AVX2 baseline(SSE SSE2 SSE3)" - } + "absolute": { + "dd": { + "current": "baseline(X86_V2)", + "available": "baseline(X86_V2)" + }, + "Ff": { + "current": "X86_V3", + "available": "X86_V3 baseline(X86_V2)" + }, + "Dd": { + "current": "X86_V3", + "available": "X86_V3 baseline(X86_V2)" + } + }, + "add": { + "ddd": { + "current": "X86_V3", + "available": "X86_V3 baseline(X86_V2)" + }, + "FFF": { + "current": "X86_V3", + "available": "X86_V3 baseline(X86_V2)" + } } } diff --git a/doc/source/reference/simd/gen_features.py b/doc/source/reference/simd/gen_features.py deleted file mode 100644 index 3394f67f23ef..000000000000 --- a/doc/source/reference/simd/gen_features.py +++ /dev/null @@ -1,196 +0,0 @@ -""" -Generate CPU features tables from CCompilerOpt -""" -from os import path - -from numpy.distutils.ccompiler_opt import CCompilerOpt - - -class FakeCCompilerOpt(CCompilerOpt): - # disable caching no need for it - conf_nocache = True - - def __init__(self, arch, cc, *args, **kwargs): - self.fake_info = (arch, cc, '') - CCompilerOpt.__init__(self, None, **kwargs) - - def dist_compile(self, sources, flags, **kwargs): - return sources - - def dist_info(self): - return self.fake_info - - @staticmethod - def dist_log(*args, stderr=False): - # avoid printing - pass - - def feature_test(self, name, force_flags=None, macros=[]): - # To speed up - return True - -class Features: - def __init__(self, arch, cc): - self.copt = FakeCCompilerOpt(arch, cc, cpu_baseline="max") - - def names(self): - return self.copt.cpu_baseline_names() - - def serialize(self, features_names): - result = [] - for f in self.copt.feature_sorted(features_names): - gather = self.copt.feature_supported.get(f, {}).get("group", []) - implies = self.copt.feature_sorted(self.copt.feature_implies(f)) - result.append((f, implies, gather)) - return result - - def table(self, **kwargs): - return self.gen_table(self.serialize(self.names()), **kwargs) - - def table_diff(self, vs, **kwargs): - fnames = set(self.names()) - fnames_vs = set(vs.names()) - common = fnames.intersection(fnames_vs) - extra = fnames.difference(fnames_vs) - notavl = fnames_vs.difference(fnames) - iextra = {} - inotavl = {} - idiff = set() - for f in common: - implies = self.copt.feature_implies(f) - implies_vs = vs.copt.feature_implies(f) - e = implies.difference(implies_vs) - i = implies_vs.difference(implies) - if not i and not e: - continue - if e: - iextra[f] = e - if i: - inotavl[f] = e - idiff.add(f) - - def fbold(f): - if f in extra: - return f':enabled:`{f}`' - if f in notavl: - return f':disabled:`{f}`' - return f - - def fbold_implies(f, i): - if i in iextra.get(f, {}): - return f':enabled:`{i}`' - if f in notavl or i in inotavl.get(f, {}): - return f':disabled:`{i}`' - return i - - diff_all = self.serialize(idiff.union(extra)) - diff_all += vs.serialize(notavl) - content = self.gen_table( - diff_all, fstyle=fbold, fstyle_implies=fbold_implies, **kwargs - ) - return content - - def gen_table(self, serialized_features, fstyle=None, fstyle_implies=None, - **kwargs): - - if fstyle is None: - fstyle = lambda ft: f'``{ft}``' - if fstyle_implies is None: - fstyle_implies = lambda origin, ft: fstyle(ft) - - rows = [] - have_gather = False - for f, implies, gather in serialized_features: - if gather: - have_gather = True - name = fstyle(f) - implies = ' '.join([fstyle_implies(f, i) for i in implies]) - gather = ' '.join([fstyle_implies(f, i) for i in gather]) - rows.append((name, implies, gather)) - if not rows: - return '' - fields = ["Name", "Implies", "Gathers"] - if not have_gather: - del fields[2] - rows = [(name, implies) for name, implies, _ in rows] - return self.gen_rst_table(fields, rows, **kwargs) - - def gen_rst_table(self, field_names, rows, tab_size=4): - assert not rows or len(field_names) == len(rows[0]) - rows.append(field_names) - fld_len = len(field_names) - cls_len = [max(len(c[i]) for c in rows) for i in range(fld_len)] - del rows[-1] - cformat = ' '.join('{:<%d}' % i for i in cls_len) - border = cformat.format(*['=' * i for i in cls_len]) - - rows = [cformat.format(*row) for row in rows] - # header - rows = [border, cformat.format(*field_names), border] + rows - # footer - rows += [border] - # add left margin - rows = [(' ' * tab_size) + r for r in rows] - return '\n'.join(rows) - -def wrapper_section(title, content, tab_size=4): - tab = ' ' * tab_size - if content: - return ( - f"{title}\n{'~' * len(title)}" - f"\n.. table::\n{tab}:align: left\n\n" - f"{content}\n\n" - ) - return '' - -def wrapper_tab(title, table, tab_size=4): - tab = ' ' * tab_size - if table: - ('\n' + tab).join(( - '.. tab:: ' + title, - tab + '.. table::', - tab + 'align: left', - table + '\n\n' - )) - return '' - - -if __name__ == '__main__': - - pretty_names = { - "PPC64": "IBM/POWER big-endian", - "PPC64LE": "IBM/POWER little-endian", - "S390X": "IBM/ZSYSTEM(S390X)", - "ARMHF": "ARMv7/A32", - "AARCH64": "ARMv8/A64", - "ICC": "Intel Compiler", - # "ICCW": "Intel Compiler msvc-like", - "MSVC": "Microsoft Visual C/C++" - } - gen_path = path.join( - path.dirname(path.realpath(__file__)), "generated_tables" - ) - with open(path.join(gen_path, 'cpu_features.inc'), 'w') as fd: - fd.write(f'.. generated via {__file__}\n\n') - for arch in ( - ("x86", "PPC64", "PPC64LE", "ARMHF", "AARCH64", "S390X") - ): - title = "On " + pretty_names.get(arch, arch) - table = Features(arch, 'gcc').table() - fd.write(wrapper_section(title, table)) - - with open(path.join(gen_path, 'compilers-diff.inc'), 'w') as fd: - fd.write(f'.. generated via {__file__}\n\n') - for arch, cc_names in ( - ("x86", ("clang", "ICC", "MSVC")), - ("PPC64", ("clang",)), - ("PPC64LE", ("clang",)), - ("ARMHF", ("clang",)), - ("AARCH64", ("clang",)), - ("S390X", ("clang",)) - ): - arch_pname = pretty_names.get(arch, arch) - for cc in cc_names: - title = f"On {arch_pname}::{pretty_names.get(cc, cc)}" - table = Features(arch, cc).table_diff(Features(arch, "gcc")) - fd.write(wrapper_section(title, table)) diff --git a/doc/source/reference/simd/generated_tables/compilers-diff.inc b/doc/source/reference/simd/generated_tables/compilers-diff.inc deleted file mode 100644 index d5a87da3c617..000000000000 --- a/doc/source/reference/simd/generated_tables/compilers-diff.inc +++ /dev/null @@ -1,35 +0,0 @@ -.. generated via /numpy/numpy/./doc/source/reference/simd/gen_features.py - -On x86::Intel Compiler -~~~~~~~~~~~~~~~~~~~~~~ -.. table:: - :align: left - - ====================== ================================================================================================================================================================================================================================================================================================================================== ====================== - Name Implies Gathers - ====================== ================================================================================================================================================================================================================================================================================================================================== ====================== - FMA3 SSE SSE2 SSE3 SSSE3 SSE41 POPCNT SSE42 AVX F16C :enabled:`AVX2` - AVX2 SSE SSE2 SSE3 SSSE3 SSE41 POPCNT SSE42 AVX F16C :enabled:`FMA3` - AVX512F SSE SSE2 SSE3 SSSE3 SSE41 POPCNT SSE42 AVX F16C FMA3 AVX2 :enabled:`AVX512CD` - :disabled:`XOP` :disabled:`SSE` :disabled:`SSE2` :disabled:`SSE3` :disabled:`SSSE3` :disabled:`SSE41` :disabled:`POPCNT` :disabled:`SSE42` :disabled:`AVX` - :disabled:`FMA4` :disabled:`SSE` :disabled:`SSE2` :disabled:`SSE3` :disabled:`SSSE3` :disabled:`SSE41` :disabled:`POPCNT` :disabled:`SSE42` :disabled:`AVX` - :disabled:`AVX512_SPR` :disabled:`SSE` :disabled:`SSE2` :disabled:`SSE3` :disabled:`SSSE3` :disabled:`SSE41` :disabled:`POPCNT` :disabled:`SSE42` :disabled:`AVX` :disabled:`F16C` :disabled:`FMA3` :disabled:`AVX2` :disabled:`AVX512F` :disabled:`AVX512CD` :disabled:`AVX512_SKX` :disabled:`AVX512_CLX` :disabled:`AVX512_CNL` :disabled:`AVX512_ICL` :disabled:`AVX512FP16` - ====================== ================================================================================================================================================================================================================================================================================================================================== ====================== - -On x86::Microsoft Visual C/C++ -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -.. table:: - :align: left - - ====================== ================================================================================================================================================================================================================================================================================================================================== ============================================================================= - Name Implies Gathers - ====================== ================================================================================================================================================================================================================================================================================================================================== ============================================================================= - FMA3 SSE SSE2 SSE3 SSSE3 SSE41 POPCNT SSE42 AVX F16C :enabled:`AVX2` - AVX2 SSE SSE2 SSE3 SSSE3 SSE41 POPCNT SSE42 AVX F16C :enabled:`FMA3` - AVX512F SSE SSE2 SSE3 SSSE3 SSE41 POPCNT SSE42 AVX F16C FMA3 AVX2 :enabled:`AVX512CD` :enabled:`AVX512_SKX` - AVX512CD SSE SSE2 SSE3 SSSE3 SSE41 POPCNT SSE42 AVX F16C FMA3 AVX2 AVX512F :enabled:`AVX512_SKX` - :disabled:`AVX512_KNL` :disabled:`SSE` :disabled:`SSE2` :disabled:`SSE3` :disabled:`SSSE3` :disabled:`SSE41` :disabled:`POPCNT` :disabled:`SSE42` :disabled:`AVX` :disabled:`F16C` :disabled:`FMA3` :disabled:`AVX2` :disabled:`AVX512F` :disabled:`AVX512CD` :disabled:`AVX512ER` :disabled:`AVX512PF` - :disabled:`AVX512_KNM` :disabled:`SSE` :disabled:`SSE2` :disabled:`SSE3` :disabled:`SSSE3` :disabled:`SSE41` :disabled:`POPCNT` :disabled:`SSE42` :disabled:`AVX` :disabled:`F16C` :disabled:`FMA3` :disabled:`AVX2` :disabled:`AVX512F` :disabled:`AVX512CD` :disabled:`AVX512_KNL` :disabled:`AVX5124FMAPS` :disabled:`AVX5124VNNIW` :disabled:`AVX512VPOPCNTDQ` - :disabled:`AVX512_SPR` :disabled:`SSE` :disabled:`SSE2` :disabled:`SSE3` :disabled:`SSSE3` :disabled:`SSE41` :disabled:`POPCNT` :disabled:`SSE42` :disabled:`AVX` :disabled:`F16C` :disabled:`FMA3` :disabled:`AVX2` :disabled:`AVX512F` :disabled:`AVX512CD` :disabled:`AVX512_SKX` :disabled:`AVX512_CLX` :disabled:`AVX512_CNL` :disabled:`AVX512_ICL` :disabled:`AVX512FP16` - ====================== ================================================================================================================================================================================================================================================================================================================================== ============================================================================= - diff --git a/doc/source/reference/simd/generated_tables/cpu_features.inc b/doc/source/reference/simd/generated_tables/cpu_features.inc deleted file mode 100644 index 603370e21545..000000000000 --- a/doc/source/reference/simd/generated_tables/cpu_features.inc +++ /dev/null @@ -1,109 +0,0 @@ -.. generated via /numpy/numpy/./doc/source/reference/simd/gen_features.py - -On x86 -~~~~~~ -.. table:: - :align: left - - ============== ========================================================================================================================================================================================== ===================================================== - Name Implies Gathers - ============== ========================================================================================================================================================================================== ===================================================== - ``SSE`` ``SSE2`` - ``SSE2`` ``SSE`` - ``SSE3`` ``SSE`` ``SSE2`` - ``SSSE3`` ``SSE`` ``SSE2`` ``SSE3`` - ``SSE41`` ``SSE`` ``SSE2`` ``SSE3`` ``SSSE3`` - ``POPCNT`` ``SSE`` ``SSE2`` ``SSE3`` ``SSSE3`` ``SSE41`` - ``SSE42`` ``SSE`` ``SSE2`` ``SSE3`` ``SSSE3`` ``SSE41`` ``POPCNT`` - ``AVX`` ``SSE`` ``SSE2`` ``SSE3`` ``SSSE3`` ``SSE41`` ``POPCNT`` ``SSE42`` - ``XOP`` ``SSE`` ``SSE2`` ``SSE3`` ``SSSE3`` ``SSE41`` ``POPCNT`` ``SSE42`` ``AVX`` - ``FMA4`` ``SSE`` ``SSE2`` ``SSE3`` ``SSSE3`` ``SSE41`` ``POPCNT`` ``SSE42`` ``AVX`` - ``F16C`` ``SSE`` ``SSE2`` ``SSE3`` ``SSSE3`` ``SSE41`` ``POPCNT`` ``SSE42`` ``AVX`` - ``FMA3`` ``SSE`` ``SSE2`` ``SSE3`` ``SSSE3`` ``SSE41`` ``POPCNT`` ``SSE42`` ``AVX`` ``F16C`` - ``AVX2`` ``SSE`` ``SSE2`` ``SSE3`` ``SSSE3`` ``SSE41`` ``POPCNT`` ``SSE42`` ``AVX`` ``F16C`` - ``AVX512F`` ``SSE`` ``SSE2`` ``SSE3`` ``SSSE3`` ``SSE41`` ``POPCNT`` ``SSE42`` ``AVX`` ``F16C`` ``FMA3`` ``AVX2`` - ``AVX512CD`` ``SSE`` ``SSE2`` ``SSE3`` ``SSSE3`` ``SSE41`` ``POPCNT`` ``SSE42`` ``AVX`` ``F16C`` ``FMA3`` ``AVX2`` ``AVX512F`` - ``AVX512_KNL`` ``SSE`` ``SSE2`` ``SSE3`` ``SSSE3`` ``SSE41`` ``POPCNT`` ``SSE42`` ``AVX`` ``F16C`` ``FMA3`` ``AVX2`` ``AVX512F`` ``AVX512CD`` ``AVX512ER`` ``AVX512PF`` - ``AVX512_KNM`` ``SSE`` ``SSE2`` ``SSE3`` ``SSSE3`` ``SSE41`` ``POPCNT`` ``SSE42`` ``AVX`` ``F16C`` ``FMA3`` ``AVX2`` ``AVX512F`` ``AVX512CD`` ``AVX512_KNL`` ``AVX5124FMAPS`` ``AVX5124VNNIW`` ``AVX512VPOPCNTDQ`` - ``AVX512_SKX`` ``SSE`` ``SSE2`` ``SSE3`` ``SSSE3`` ``SSE41`` ``POPCNT`` ``SSE42`` ``AVX`` ``F16C`` ``FMA3`` ``AVX2`` ``AVX512F`` ``AVX512CD`` ``AVX512VL`` ``AVX512BW`` ``AVX512DQ`` - ``AVX512_CLX`` ``SSE`` ``SSE2`` ``SSE3`` ``SSSE3`` ``SSE41`` ``POPCNT`` ``SSE42`` ``AVX`` ``F16C`` ``FMA3`` ``AVX2`` ``AVX512F`` ``AVX512CD`` ``AVX512_SKX`` ``AVX512VNNI`` - ``AVX512_CNL`` ``SSE`` ``SSE2`` ``SSE3`` ``SSSE3`` ``SSE41`` ``POPCNT`` ``SSE42`` ``AVX`` ``F16C`` ``FMA3`` ``AVX2`` ``AVX512F`` ``AVX512CD`` ``AVX512_SKX`` ``AVX512IFMA`` ``AVX512VBMI`` - ``AVX512_ICL`` ``SSE`` ``SSE2`` ``SSE3`` ``SSSE3`` ``SSE41`` ``POPCNT`` ``SSE42`` ``AVX`` ``F16C`` ``FMA3`` ``AVX2`` ``AVX512F`` ``AVX512CD`` ``AVX512_SKX`` ``AVX512_CLX`` ``AVX512_CNL`` ``AVX512VBMI2`` ``AVX512BITALG`` ``AVX512VPOPCNTDQ`` - ``AVX512_SPR`` ``SSE`` ``SSE2`` ``SSE3`` ``SSSE3`` ``SSE41`` ``POPCNT`` ``SSE42`` ``AVX`` ``F16C`` ``FMA3`` ``AVX2`` ``AVX512F`` ``AVX512CD`` ``AVX512_SKX`` ``AVX512_CLX`` ``AVX512_CNL`` ``AVX512_ICL`` ``AVX512FP16`` - ============== ========================================================================================================================================================================================== ===================================================== - -On IBM/POWER big-endian -~~~~~~~~~~~~~~~~~~~~~~~ -.. table:: - :align: left - - ======== ========================= - Name Implies - ======== ========================= - ``VSX`` - ``VSX2`` ``VSX`` - ``VSX3`` ``VSX`` ``VSX2`` - ``VSX4`` ``VSX`` ``VSX2`` ``VSX3`` - ======== ========================= - -On IBM/POWER little-endian -~~~~~~~~~~~~~~~~~~~~~~~~~~ -.. table:: - :align: left - - ======== ========================= - Name Implies - ======== ========================= - ``VSX`` ``VSX2`` - ``VSX2`` ``VSX`` - ``VSX3`` ``VSX`` ``VSX2`` - ``VSX4`` ``VSX`` ``VSX2`` ``VSX3`` - ======== ========================= - -On ARMv7/A32 -~~~~~~~~~~~~ -.. table:: - :align: left - - ============== =========================================================== - Name Implies - ============== =========================================================== - ``NEON`` - ``NEON_FP16`` ``NEON`` - ``NEON_VFPV4`` ``NEON`` ``NEON_FP16`` - ``ASIMD`` ``NEON`` ``NEON_FP16`` ``NEON_VFPV4`` - ``ASIMDHP`` ``NEON`` ``NEON_FP16`` ``NEON_VFPV4`` ``ASIMD`` - ``ASIMDDP`` ``NEON`` ``NEON_FP16`` ``NEON_VFPV4`` ``ASIMD`` - ``ASIMDFHM`` ``NEON`` ``NEON_FP16`` ``NEON_VFPV4`` ``ASIMD`` ``ASIMDHP`` - ============== =========================================================== - -On ARMv8/A64 -~~~~~~~~~~~~ -.. table:: - :align: left - - ============== =========================================================== - Name Implies - ============== =========================================================== - ``NEON`` ``NEON_FP16`` ``NEON_VFPV4`` ``ASIMD`` - ``NEON_FP16`` ``NEON`` ``NEON_VFPV4`` ``ASIMD`` - ``NEON_VFPV4`` ``NEON`` ``NEON_FP16`` ``ASIMD`` - ``ASIMD`` ``NEON`` ``NEON_FP16`` ``NEON_VFPV4`` - ``ASIMDHP`` ``NEON`` ``NEON_FP16`` ``NEON_VFPV4`` ``ASIMD`` - ``ASIMDDP`` ``NEON`` ``NEON_FP16`` ``NEON_VFPV4`` ``ASIMD`` - ``ASIMDFHM`` ``NEON`` ``NEON_FP16`` ``NEON_VFPV4`` ``ASIMD`` ``ASIMDHP`` - ============== =========================================================== - -On IBM/ZSYSTEM(S390X) -~~~~~~~~~~~~~~~~~~~~~ -.. table:: - :align: left - - ======== ============== - Name Implies - ======== ============== - ``VX`` - ``VXE`` ``VX`` - ``VXE2`` ``VX`` ``VXE`` - ======== ============== - diff --git a/doc/source/reference/simd/log_example.txt b/doc/source/reference/simd/log_example.txt index 79c5c6c253ca..c71306d42aae 100644 --- a/doc/source/reference/simd/log_example.txt +++ b/doc/source/reference/simd/log_example.txt @@ -1,79 +1,64 @@ -########### EXT COMPILER OPTIMIZATION ########### -Platform : - Architecture: x64 - Compiler : gcc +Test features "X86_V2" : Supported +Test features "X86_V3" : Supported +Test features "X86_V4" : Supported +Test features "AVX512_ICL" : Supported +Test features "AVX512_SPR" : Supported +Configuring npy_cpu_dispatch_config.h using configuration +Message: +CPU Optimization Options + baseline: + Requested : min + Enabled : X86_V2 + dispatch: + Requested : max + Enabled : X86_V3 X86_V4 AVX512_ICL AVX512_SPR -CPU baseline : - Requested : 'min' - Enabled : SSE SSE2 SSE3 - Flags : -msse -msse2 -msse3 - Extra checks: none - -CPU dispatch : - Requested : 'max -xop -fma4' - Enabled : SSSE3 SSE41 POPCNT SSE42 AVX F16C FMA3 AVX2 AVX512F AVX512CD AVX512_KNL AVX512_KNM AVX512_SKX AVX512_CLX AVX512_CNL AVX512_ICL - Generated : - : - SSE41 : SSE SSE2 SSE3 SSSE3 - Flags : -msse -msse2 -msse3 -mssse3 -msse4.1 - Extra checks: none - Detect : SSE SSE2 SSE3 SSSE3 SSE41 - : build/src.linux-x86_64-3.9/numpy/_core/src/umath/loops_arithmetic.dispatch.c - : numpy/_core/src/umath/_umath_tests.dispatch.c - : - SSE42 : SSE SSE2 SSE3 SSSE3 SSE41 POPCNT - Flags : -msse -msse2 -msse3 -mssse3 -msse4.1 -mpopcnt -msse4.2 - Extra checks: none - Detect : SSE SSE2 SSE3 SSSE3 SSE41 POPCNT SSE42 - : build/src.linux-x86_64-3.9/numpy/_core/src/_simd/_simd.dispatch.c - : - AVX2 : SSE SSE2 SSE3 SSSE3 SSE41 POPCNT SSE42 AVX F16C - Flags : -msse -msse2 -msse3 -mssse3 -msse4.1 -mpopcnt -msse4.2 -mavx -mf16c -mavx2 - Extra checks: none - Detect : AVX F16C AVX2 - : build/src.linux-x86_64-3.9/numpy/_core/src/umath/loops_arithm_fp.dispatch.c - : build/src.linux-x86_64-3.9/numpy/_core/src/umath/loops_arithmetic.dispatch.c - : numpy/_core/src/umath/_umath_tests.dispatch.c - : - (FMA3 AVX2) : SSE SSE2 SSE3 SSSE3 SSE41 POPCNT SSE42 AVX F16C - Flags : -msse -msse2 -msse3 -mssse3 -msse4.1 -mpopcnt -msse4.2 -mavx -mf16c -mfma -mavx2 - Extra checks: none - Detect : AVX F16C FMA3 AVX2 - : build/src.linux-x86_64-3.9/numpy/_core/src/_simd/_simd.dispatch.c - : build/src.linux-x86_64-3.9/numpy/_core/src/umath/loops_exponent_log.dispatch.c - : build/src.linux-x86_64-3.9/numpy/_core/src/umath/loops_trigonometric.dispatch.c - : - AVX512F : SSE SSE2 SSE3 SSSE3 SSE41 POPCNT SSE42 AVX F16C FMA3 AVX2 - Flags : -msse -msse2 -msse3 -mssse3 -msse4.1 -mpopcnt -msse4.2 -mavx -mf16c -mfma -mavx2 -mavx512f - Extra checks: AVX512F_REDUCE - Detect : AVX512F - : build/src.linux-x86_64-3.9/numpy/_core/src/_simd/_simd.dispatch.c - : build/src.linux-x86_64-3.9/numpy/_core/src/umath/loops_arithm_fp.dispatch.c - : build/src.linux-x86_64-3.9/numpy/_core/src/umath/loops_arithmetic.dispatch.c - : build/src.linux-x86_64-3.9/numpy/_core/src/umath/loops_exponent_log.dispatch.c - : build/src.linux-x86_64-3.9/numpy/_core/src/umath/loops_trigonometric.dispatch.c - : - AVX512_SKX : SSE SSE2 SSE3 SSSE3 SSE41 POPCNT SSE42 AVX F16C FMA3 AVX2 AVX512F AVX512CD - Flags : -msse -msse2 -msse3 -mssse3 -msse4.1 -mpopcnt -msse4.2 -mavx -mf16c -mfma -mavx2 -mavx512f -mavx512cd -mavx512vl -mavx512bw -mavx512dq - Extra checks: AVX512BW_MASK AVX512DQ_MASK - Detect : AVX512_SKX - : build/src.linux-x86_64-3.9/numpy/_core/src/_simd/_simd.dispatch.c - : build/src.linux-x86_64-3.9/numpy/_core/src/umath/loops_arithmetic.dispatch.c - : build/src.linux-x86_64-3.9/numpy/_core/src/umath/loops_exponent_log.dispatch.c -CCompilerOpt.cache_flush[804] : write cache to path -> /home/seiko/work/repos/numpy/build/temp.linux-x86_64-3.9/ccompiler_opt_cache_ext.py - -########### CLIB COMPILER OPTIMIZATION ########### -Platform : - Architecture: x64 - Compiler : gcc - -CPU baseline : - Requested : 'min' - Enabled : SSE SSE2 SSE3 - Flags : -msse -msse2 -msse3 - Extra checks: none - -CPU dispatch : - Requested : 'max -xop -fma4' - Enabled : SSSE3 SSE41 POPCNT SSE42 AVX F16C FMA3 AVX2 AVX512F AVX512CD AVX512_KNL AVX512_KNM AVX512_SKX AVX512_CLX AVX512_CNL AVX512_ICL - Generated : none +Generating multi-targets for "_umath_tests.dispatch.h" + Enabled targets: X86_V3, baseline +Generating multi-targets for "argfunc.dispatch.h" + Enabled targets: X86_V4, X86_V3, baseline +Generating multi-targets for "x86_simd_argsort.dispatch.h" + Enabled targets: X86_V4, X86_V3 +Generating multi-targets for "x86_simd_qsort.dispatch.h" + Enabled targets: X86_V4, X86_V3 +Generating multi-targets for "x86_simd_qsort_16bit.dispatch.h" + Enabled targets: AVX512_SPR, AVX512_ICL +Generating multi-targets for "highway_qsort.dispatch.h" + Enabled targets: +Generating multi-targets for "highway_qsort_16bit.dispatch.h" + Enabled targets: +Generating multi-targets for "loops_arithm_fp.dispatch.h" + Enabled targets: X86_V3, baseline +Generating multi-targets for "loops_arithmetic.dispatch.h" + Enabled targets: X86_V4, X86_V3, baseline +Generating multi-targets for "loops_comparison.dispatch.h" + Enabled targets: X86_V4, X86_V3, baseline +Generating multi-targets for "loops_exponent_log.dispatch.h" + Enabled targets: X86_V4, X86_V3, baseline +Generating multi-targets for "loops_hyperbolic.dispatch.h" + Enabled targets: X86_V4, X86_V3, baseline +Generating multi-targets for "loops_logical.dispatch.h" + Enabled targets: X86_V4, X86_V3, baseline +Generating multi-targets for "loops_minmax.dispatch.h" + Enabled targets: X86_V4, X86_V3, baseline +Generating multi-targets for "loops_modulo.dispatch.h" + Enabled targets: baseline +Generating multi-targets for "loops_trigonometric.dispatch.h" + Enabled targets: X86_V4, X86_V3, baseline +Generating multi-targets for "loops_umath_fp.dispatch.h" + Enabled targets: X86_V4, baseline +Generating multi-targets for "loops_unary.dispatch.h" + Enabled targets: X86_V4, baseline +Generating multi-targets for "loops_unary_fp.dispatch.h" + Enabled targets: baseline +Generating multi-targets for "loops_unary_fp_le.dispatch.h" + Enabled targets: baseline +Generating multi-targets for "loops_unary_complex.dispatch.h" + Enabled targets: X86_V3, baseline +Generating multi-targets for "loops_autovec.dispatch.h" + Enabled targets: X86_V3, baseline +Generating multi-targets for "loops_half.dispatch.h" + Enabled targets: AVX512_SPR, X86_V4, baseline +WARNING: Project targets '>=1.5.2' but uses feature deprecated since '1.3.0': Source file src/umath/svml/linux/avx512/svml_z0_acos_d_la.s in the 'objects' kwarg is not an object.. +Generating multi-targets for "_simd.dispatch.h" + Enabled targets: X86_V3, X86_V4, baseline diff --git a/doc/source/reference/thread_safety.rst b/doc/source/reference/thread_safety.rst index 84590bfac39c..b07419259690 100644 --- a/doc/source/reference/thread_safety.rst +++ b/doc/source/reference/thread_safety.rst @@ -5,7 +5,7 @@ Thread Safety ************* NumPy supports use in a multithreaded context via the `threading` module in the -standard library. Many NumPy operations release the GIL, so unlike many +standard library. Many NumPy operations release the :term:`python:GIL`, so unlike many situations in Python, it is possible to improve parallel performance by exploiting multithreaded parallelism in Python. @@ -22,15 +22,27 @@ are not reproducible, let alone correct. It is also possible to crash the Python interpreter by, for example, resizing an array while another thread is reading from it to compute a ufunc operation. -In the future, we may add locking to ndarray to make writing multithreaded +In the future, we may add locking to :class:`~numpy.ndarray` to make writing multithreaded algorithms using NumPy arrays safer, but for now we suggest focusing on read-only access of arrays that are shared between threads, or adding your own locking if you need to mutation and multithreading. Note that operations that *do not* release the GIL will see no performance gains from use of the `threading` module, and instead might be better served with -`multiprocessing`. In particular, operations on arrays with ``dtype=object`` do -not release the GIL. +`multiprocessing`. In particular, operations on arrays with ``dtype=np.object_`` +do not release the GIL. + +Context-local state +------------------- + +NumPy maintains some state for ufuncs context-local basis, which means each +thread in a multithreaded program or task in an asyncio program has its own +independent configuration of the `numpy.errstate` (see +:doc:`/reference/routines.err`), and of :ref:`text_formatting_options`. + +You can update state stored in a context variable by entering a context manager. +As soon as the context manager exits, the state will be reset to its value +before entering the context manager. Free-threaded Python -------------------- @@ -40,12 +52,27 @@ Free-threaded Python Starting with NumPy 2.1 and CPython 3.13, NumPy also has experimental support for python runtimes with the GIL disabled. See https://py-free-threading.github.io for more information about installing and -using free-threaded Python, as well as information about supporting it in -libraries that depend on NumPy. - -Because free-threaded Python does not have a global interpreter lock to -serialize access to Python objects, there are more opportunities for threads to -mutate shared state and create thread safety issues. In addition to the -limitations about locking of the ndarray object noted above, this also means -that arrays with ``dtype=object`` are not protected by the GIL, creating data -races for python objects that are not possible outside free-threaded python. +using :py:term:`free-threaded ` Python, as well as +information about supporting it in libraries that depend on NumPy. + +Because free-threaded Python does not have a +global interpreter lock to serialize access to Python objects, there are more +opportunities for threads to mutate shared state and create thread safety +issues. In addition to the limitations about locking of the +:class:`~numpy.ndarray` object noted above, this also means that arrays with +``dtype=np.object_`` are not protected by the GIL, creating data races for python +objects that are not possible outside free-threaded python. + +C-API Threading Support +----------------------- + +For developers writing C extensions that interact with NumPy, several parts of +the :doc:`C-API array documentation ` provide detailed +information about multithreading considerations. + +See Also +-------- + +* :doc:`/reference/random/multithreading` - Practical example of using NumPy's + random number generators in a multithreaded context with + :mod:`concurrent.futures`. diff --git a/doc/source/reference/ufuncs.rst b/doc/source/reference/ufuncs.rst index 03d86cd057d2..cac15b66cf14 100644 --- a/doc/source/reference/ufuncs.rst +++ b/doc/source/reference/ufuncs.rst @@ -19,6 +19,10 @@ that takes a fixed number of specific inputs and produces a fixed number of specific outputs. For detailed information on universal functions, see :ref:`ufuncs-basics`. + +There are also :ref:`generalized ufuncs ` which +are functions over vectors (or arrays) instead of only single-element scalars. + :class:`ufunc` ============== @@ -52,7 +56,7 @@ tuple holding a single array) is also valid. If 'out' is None (the default), a uninitialized output array is created, which will be filled in the ufunc. At the end, this array is returned unless it is zero-dimensional, in which case it is converted to a scalar; -this conversion can be avoided by passing in ``out=...``. This can also be +this conversion can be avoided by passing in ``out=...``. This can also be spelled `out=Ellipsis` if you think that is clearer. Note that the output is filled only in the places that the broadcast @@ -183,14 +187,17 @@ possess. None of the attributes can be set. pair: ufunc; attributes -============ ================================================================= -**__doc__** A docstring for each ufunc. The first part of the docstring is - dynamically generated from the number of outputs, the name, and - the number of inputs. The second part of the docstring is - provided at creation time and stored with the ufunc. +================= ================================================================= +**__doc__** A docstring for each ufunc. The first part of the docstring is + dynamically generated from the number of outputs, the name, and + the number of inputs. The second part of the docstring is + provided at creation time and stored with the ufunc. + +**__name__** The name of the ufunc. -**__name__** The name of the ufunc. -============ ================================================================= +**__signature__** The call signature of the ufunc, as an :class:`inspect.Signature` + object. +================= ================================================================= .. autosummary:: :toctree: generated/ diff --git a/doc/source/release.rst b/doc/source/release.rst index 36d5e6731f4f..5842fa9fc61a 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -5,6 +5,13 @@ Release notes .. toctree:: :maxdepth: 2 + 2.5.0 + 2.4.0 + 2.3.5 + 2.3.4 + 2.3.3 + 2.3.2 + 2.3.1 2.3.0 2.2.6 2.2.5 diff --git a/doc/source/release/1.11.0-notes.rst b/doc/source/release/1.11.0-notes.rst index 36cd1d65a266..4700e37203ce 100644 --- a/doc/source/release/1.11.0-notes.rst +++ b/doc/source/release/1.11.0-notes.rst @@ -27,11 +27,11 @@ Details of these improvements can be found below. Build System Changes ==================== -* Numpy now uses ``setuptools`` for its builds instead of plain distutils. +* NumPy now uses ``setuptools`` for its builds instead of plain distutils. This fixes usage of ``install_requires='numpy'`` in the ``setup.py`` files of - projects that depend on Numpy (see gh-6551). It potentially affects the way - that build/install methods for Numpy itself behave though. Please report any - unexpected behavior on the Numpy issue tracker. + projects that depend on NumPy (see gh-6551). It potentially affects the way + that build/install methods for NumPy itself behave though. Please report any + unexpected behavior on the NumPy issue tracker. * Bento build support and related files have been removed. * Single file build support and related files have been removed. @@ -39,7 +39,7 @@ Build System Changes Future Changes ============== -The following changes are scheduled for Numpy 1.12.0. +The following changes are scheduled for NumPy 1.12.0. * Support for Python 2.6, 3.2, and 3.3 will be dropped. * Relaxed stride checking will become the default. See the 1.8.0 release @@ -61,7 +61,7 @@ The following changes are scheduled for Numpy 1.12.0. In a future release the following changes will be made. * The ``rand`` function exposed in ``numpy.testing`` will be removed. That - function is left over from early Numpy and was implemented using the + function is left over from early NumPy and was implemented using the Python random module. The random number generators from ``numpy.random`` should be used instead. * The ``ndarray.view`` method will only allow c_contiguous arrays to be @@ -124,7 +124,7 @@ non-integers for degree specification. *np.dot* now raises ``TypeError`` instead of ``ValueError`` ----------------------------------------------------------- -This behaviour mimics that of other functions such as ``np.inner``. If the two +This behavior mimics that of other functions such as ``np.inner``. If the two arguments cannot be cast to a common type, it could have raised a ``TypeError`` or ``ValueError`` depending on their order. Now, ``np.dot`` will now always raise a ``TypeError``. @@ -194,7 +194,7 @@ New Features * ``f2py.compile`` has a new ``extension`` keyword parameter that allows the fortran extension to be specified for generated temp files. For instance, - the files can be specifies to be ``*.f90``. The ``verbose`` argument is + the files can be specified to be ``*.f90``. The ``verbose`` argument is also activated, it was previously ignored. * A ``dtype`` parameter has been added to ``np.random.randint`` @@ -205,7 +205,7 @@ New Features - ``np.int16``, ``np.uint16``, - ``np.int32``, ``np.uint32``, - ``np.int64``, ``np.uint64``, - - ``np.int_ ``, ``np.intp`` + - ``np.int_``, ``np.intp`` The specification is by precision rather than by C type. Hence, on some platforms ``np.int64`` may be a ``long`` instead of ``long long`` even if @@ -254,7 +254,7 @@ Memory and speed improvements for masked arrays ----------------------------------------------- Creating a masked array with ``mask=True`` (resp. ``mask=False``) now uses ``np.ones`` (resp. ``np.zeros``) to create the mask, which is faster and -avoid a big memory peak. Another optimization was done to avoid a memory +avoids a big memory peak. Another optimization was done to avoid a memory peak and useless computations when printing a masked array. ``ndarray.tofile`` now uses fallocate on linux @@ -304,13 +304,13 @@ Instead, ``np.broadcast`` can be used in all cases. ``np.trace`` now respects array subclasses ------------------------------------------ -This behaviour mimics that of other functions such as ``np.diagonal`` and +This behavior mimics that of other functions such as ``np.diagonal`` and ensures, e.g., that for masked arrays ``np.trace(ma)`` and ``ma.trace()`` give the same result. ``np.dot`` now raises ``TypeError`` instead of ``ValueError`` ------------------------------------------------------------- -This behaviour mimics that of other functions such as ``np.inner``. If the two +This behavior is now consistent with other functions such as ``np.inner``. If the two arguments cannot be cast to a common type, it could have raised a ``TypeError`` or ``ValueError`` depending on their order. Now, ``np.dot`` will now always raise a ``TypeError``. diff --git a/doc/source/release/1.13.0-notes.rst b/doc/source/release/1.13.0-notes.rst index 3bfaf1ea5169..400c9553fbd3 100644 --- a/doc/source/release/1.13.0-notes.rst +++ b/doc/source/release/1.13.0-notes.rst @@ -136,7 +136,7 @@ implement ``__*slice__`` on the derived class, as ``__*item__`` will intercept these calls correctly. Any code that did implement these will work exactly as before. Code that -invokes``ndarray.__getslice__`` (e.g. through ``super(...).__getslice__``) will +invokes ``ndarray.__getslice__`` (e.g. through ``super(...).__getslice__``) will now issue a DeprecationWarning - ``.__getitem__(slice(start, end))`` should be used instead. diff --git a/doc/source/release/1.14.0-notes.rst b/doc/source/release/1.14.0-notes.rst index 68040b470caa..055a933291b9 100644 --- a/doc/source/release/1.14.0-notes.rst +++ b/doc/source/release/1.14.0-notes.rst @@ -409,8 +409,8 @@ This new default changes the float output relative to numpy 1.13. The old behavior can be obtained in 1.13 "legacy" printing mode, see compatibility notes above. -``hermitian`` option added to``np.linalg.matrix_rank`` ------------------------------------------------------- +``hermitian`` option added to ``np.linalg.matrix_rank`` +------------------------------------------------------- The new ``hermitian`` option allows choosing between standard SVD based matrix rank calculation and the more efficient eigenvalue based method for symmetric/hermitian matrices. diff --git a/doc/source/release/1.15.0-notes.rst b/doc/source/release/1.15.0-notes.rst index e84386f0fa5d..7aa85d167d29 100644 --- a/doc/source/release/1.15.0-notes.rst +++ b/doc/source/release/1.15.0-notes.rst @@ -213,7 +213,7 @@ C API changes New functions ``npy_get_floatstatus_barrier`` and ``npy_clear_floatstatus_barrier`` ----------------------------------------------------------------------------------- Functions ``npy_get_floatstatus_barrier`` and ``npy_clear_floatstatus_barrier`` -have been added and should be used in place of the ``npy_get_floatstatus``and +have been added and should be used in place of the ``npy_get_floatstatus`` and ``npy_clear_status`` functions. Optimizing compilers like GCC 8.1 and Clang were rearranging the order of operations when the previous functions were used in the ufunc SIMD functions, resulting in the floatstatus flags being checked @@ -326,8 +326,8 @@ passed explicitly, and are not yet computed automatically. No longer does an IQR of 0 result in ``n_bins=1``, rather the number of bins chosen is related to the data size in this situation. -The edges returned by `histogram`` and ``histogramdd`` now match the data float type ------------------------------------------------------------------------------------- +The edges returned by ``histogram`` and ``histogramdd`` now match the data float type +------------------------------------------------------------------------------------- When passed ``np.float16``, ``np.float32``, or ``np.longdouble`` data, the returned edges are now of the same dtype. Previously, ``histogram`` would only return the same type if explicit bins were given, and ``histogram`` would diff --git a/doc/source/release/1.16.0-notes.rst b/doc/source/release/1.16.0-notes.rst index 07e06ca6e043..7a387629fe46 100644 --- a/doc/source/release/1.16.0-notes.rst +++ b/doc/source/release/1.16.0-notes.rst @@ -271,7 +271,7 @@ via the services of shippable.com. Appending to build flags ------------------------ -`numpy.distutils` has always overridden rather than appended to `LDFLAGS` and +``numpy.distutils`` has always overridden rather than appended to `LDFLAGS` and other similar such environment variables for compiling Fortran extensions. Now, if the `NPY_DISTUTILS_APPEND_FLAGS` environment variable is set to 1, the behavior will be appending. This applied to: `LDFLAGS`, `F77FLAGS`, diff --git a/doc/source/release/1.18.0-notes.rst b/doc/source/release/1.18.0-notes.rst index 15e0ad77f5d1..43d2cdedf4b6 100644 --- a/doc/source/release/1.18.0-notes.rst +++ b/doc/source/release/1.18.0-notes.rst @@ -202,9 +202,9 @@ exception will require adaptation, and code that mistakenly called Moved modules in ``numpy.random`` --------------------------------- As part of the API cleanup, the submodules in ``numpy.random`` -``bit_generator``, ``philox``, ``pcg64``, ``sfc64, ``common``, ``generator``, +``bit_generator``, ``philox``, ``pcg64``, ``sfc64``, ``common``, ``generator``, and ``bounded_integers`` were moved to ``_bit_generator``, ``_philox``, -``_pcg64``, ``_sfc64, ``_common``, ``_generator``, and ``_bounded_integers`` +``_pcg64``, ``_sfc64``, ``_common``, ``_generator``, and ``_bounded_integers`` respectively to indicate that they are not part of the public interface. (`gh-14608 `__) @@ -350,7 +350,7 @@ and load will be addressed in a future release. ``numpy.distutils`` append behavior changed for LDFLAGS and similar ------------------------------------------------------------------- -`numpy.distutils` has always overridden rather than appended to ``LDFLAGS`` and +``numpy.distutils`` has always overridden rather than appended to ``LDFLAGS`` and other similar such environment variables for compiling Fortran extensions. Now the default behavior has changed to appending - which is the expected behavior in most situations. To preserve the old (overwriting) behavior, set the diff --git a/doc/source/release/1.20.0-notes.rst b/doc/source/release/1.20.0-notes.rst index a2276ac5016d..298d417bb0c2 100644 --- a/doc/source/release/1.20.0-notes.rst +++ b/doc/source/release/1.20.0-notes.rst @@ -735,7 +735,7 @@ checking. Negation of user defined BLAS/LAPACK detection order ---------------------------------------------------- -`~numpy.distutils` allows negation of libraries when determining BLAS/LAPACK +``~numpy.distutils`` allows negation of libraries when determining BLAS/LAPACK libraries. This may be used to remove an item from the library resolution phase, i.e. to disallow NetLIB libraries one could do: diff --git a/doc/source/release/1.21.5-notes.rst b/doc/source/release/1.21.5-notes.rst index c69d26771268..b3e810b51c06 100644 --- a/doc/source/release/1.21.5-notes.rst +++ b/doc/source/release/1.21.5-notes.rst @@ -33,7 +33,7 @@ A total of 11 pull requests were merged for this release. * `#20462 `__: BUG: Fix float16 einsum fastpaths using wrong tempvar * `#20463 `__: BUG, DIST: Print os error message when the executable not exist * `#20464 `__: BLD: Verify the ability to compile C++ sources before initiating... -* `#20465 `__: BUG: Force ``npymath` ` to respect ``npy_longdouble`` +* `#20465 `__: BUG: Force ``npymath`` to respect ``npy_longdouble`` * `#20466 `__: BUG: Fix failure to create aligned, empty structured dtype * `#20467 `__: ENH: provide a convenience function to replace npy_load_module * `#20495 `__: MAINT: update wheel to version that supports python3.10 diff --git a/doc/source/release/2.3.0-notes.rst b/doc/source/release/2.3.0-notes.rst index 74f11a0b4537..4c3c923b3b5e 100644 --- a/doc/source/release/2.3.0-notes.rst +++ b/doc/source/release/2.3.0-notes.rst @@ -4,16 +4,529 @@ NumPy 2.3.0 Release Notes ========================== +The NumPy 2.3.0 release continues the work to improve free threaded Python +support and annotations together with the usual set of bug fixes. It is unusual +in the number of expired deprecations, code modernizations, and style cleanups. +The latter may not be visible to users, but is important for code maintenance +over the long term. Note that we have also upgraded from manylinux2014 to +manylinux_2_28. + +Users running on a Mac having an M4 cpu might see various warnings about +invalid values and such. The warnings are a known problem with Accelerate. +They are annoying, but otherwise harmless. Apple promises to fix them. + +This release supports Python versions 3.11-3.13, Python 3.14 will be supported +when it is released. + Highlights ========== -*We'll choose highlights for this release near the end of the release cycle.* +* Interactive examples in the NumPy documentation. +* Building NumPy with OpenMP Parallelization. +* Preliminary support for Windows on ARM. +* Improved support for free threaded Python. +* Improved annotations. + + +New functions +============= + +New function ``numpy.strings.slice`` +------------------------------------ +The new function ``numpy.strings.slice`` was added, which implements fast +native slicing of string arrays. It supports the full slicing API including +negative slice offsets and steps. + +(`gh-27789 `__) + + +Deprecations +============ + +* The ``numpy.typing.mypy_plugin`` has been deprecated in favor of platform-agnostic + static type inference. Please remove ``numpy.typing.mypy_plugin`` from the ``plugins`` + section of your mypy configuration. If this change results in new errors being + reported, kindly open an issue. + + (`gh-28129 `__) + +* The ``numpy.typing.NBitBase`` type has been deprecated and will be removed in + a future version. + + This type was previously intended to be used as a generic upper bound for + type-parameters, for example: + + .. code-block:: python + + import numpy as np + import numpy.typing as npt + + def f[NT: npt.NBitBase](x: np.complexfloating[NT]) -> np.floating[NT]: ... + + But in NumPy 2.2.0, ``float64`` and ``complex128`` were changed to concrete + subtypes, causing static type-checkers to reject ``x: np.float64 = + f(np.complex128(42j))``. + + So instead, the better approach is to use ``typing.overload``: + + .. code-block:: python + + import numpy as np + from typing import overload + + @overload + def f(x: np.complex64) -> np.float32: ... + @overload + def f(x: np.complex128) -> np.float64: ... + @overload + def f(x: np.clongdouble) -> np.longdouble: ... + + (`gh-28884 `__) + + +Expired deprecations +==================== + +* Remove deprecated macros like ``NPY_OWNDATA`` from Cython interfaces in favor + of ``NPY_ARRAY_OWNDATA`` (deprecated since 1.7) + + (`gh-28254 `__) + +* Remove ``numpy/npy_1_7_deprecated_api.h`` and C macros like ``NPY_OWNDATA`` + in favor of ``NPY_ARRAY_OWNDATA`` (deprecated since 1.7) + + (`gh-28254 `__) + +* Remove alias ``generate_divbyzero_error`` to + ``npy_set_floatstatus_divbyzero`` and ``generate_overflow_error`` to + ``npy_set_floatstatus_overflow`` (deprecated since 1.10) + + (`gh-28254 `__) + +* Remove ``np.tostring`` (deprecated since 1.19) + + (`gh-28254 `__) + +* Raise on ``np.conjugate`` of non-numeric types (deprecated since 1.13) + + (`gh-28254 `__) + +* Raise when using ``np.bincount(...minlength=None)``, use 0 instead + (deprecated since 1.14) + + (`gh-28254 `__) + +* Passing ``shape=None`` to functions with a non-optional shape argument + errors, use ``()`` instead (deprecated since 1.20) + + (`gh-28254 `__) + +* Inexact matches for ``mode`` and ``searchside`` raise (deprecated since 1.20) + + (`gh-28254 `__) + +* Setting ``__array_finalize__ = None`` errors (deprecated since 1.23) + + (`gh-28254 `__) + +* ``np.fromfile`` and ``np.fromstring`` error on bad data, previously they + would guess (deprecated since 1.18) + + (`gh-28254 `__) + +* ``datetime64`` and ``timedelta64`` construction with a tuple no longer + accepts an ``event`` value, either use a two-tuple of (unit, num) or a + 4-tuple of (unit, num, den, 1) (deprecated since 1.14) + + (`gh-28254 `__) + +* When constructing a ``dtype`` from a class with a ``dtype`` attribute, that + attribute must be a dtype-instance rather than a thing that can be parsed as + a dtype instance (deprecated in 1.19). At some point the whole construct of + using a dtype attribute will be deprecated (see #25306) + + (`gh-28254 `__) + +* Passing booleans as partition index errors (deprecated since 1.23) + + (`gh-28254 `__) + +* Out-of-bounds indexes error even on empty arrays (deprecated since 1.20) + + (`gh-28254 `__) + +* ``np.tostring`` has been removed, use ``tobytes`` instead (deprecated since 1.19) + + (`gh-28254 `__) + +* Disallow make a non-writeable array writeable for arrays with a base that do + not own their data (deprecated since 1.17) + + (`gh-28254 `__) + +* ``concatenate()`` with ``axis=None`` uses ``same-kind`` casting by default, + not ``unsafe`` (deprecated since 1.20) + + (`gh-28254 `__) + +* Unpickling a scalar with object dtype errors (deprecated since 1.20) + + (`gh-28254 `__) + +* The binary mode of ``fromstring`` now errors, use ``frombuffer`` instead + (deprecated since 1.14) + + (`gh-28254 `__) + +* Converting ``np.inexact`` or ``np.floating`` to a dtype errors (deprecated + since 1.19) + + (`gh-28254 `__) + +* Converting ``np.complex``, ``np.integer``, ``np.signedinteger``, + ``np.unsignedinteger``, ``np.generic`` to a dtype errors (deprecated since + 1.19) + + (`gh-28254 `__) + +* The Python built-in ``round`` errors for complex scalars. Use ``np.round`` or + ``scalar.round`` instead (deprecated since 1.19) + + (`gh-28254 `__) + +* 'np.bool' scalars can no longer be interpreted as an index (deprecated since 1.19) + + (`gh-28254 `__) + +* Parsing an integer via a float string is no longer supported. (deprecated + since 1.23) To avoid this error you can + * make sure the original data is stored as integers. + * use the ``converters=float`` keyword argument. + * Use ``np.loadtxt(...).astype(np.int64)`` + + (`gh-28254 `__) + +* The use of a length 1 tuple for the ufunc ``signature`` errors. Use ``dtype`` + or fill the tuple with ``None`` (deprecated since 1.19) + + (`gh-28254 `__) + +* Special handling of matrix is in np.outer is removed. Convert to a ndarray + via ``matrix.A`` (deprecated since 1.20) + + (`gh-28254 `__) + +* Removed the ``np.compat`` package source code (removed in 2.0) + + (`gh-28961 `__) + + +C API changes +============= + +* ``NpyIter_GetTransferFlags`` is now available to check if + the iterator needs the Python API or if casts may cause floating point + errors (FPE). FPEs can for example be set when casting ``float64(1e300)`` + to ``float32`` (overflow to infinity) or a NaN to an integer (invalid value). + + (`gh-27883 `__) + +* ``NpyIter`` now has no limit on the number of operands it supports. + + (`gh-28080 `__) + +New ``NpyIter_GetTransferFlags`` and ``NpyIter_IterationNeedsAPI`` change +------------------------------------------------------------------------- +NumPy now has the new ``NpyIter_GetTransferFlags`` function as a more precise +way checking of iterator/buffering needs. I.e. whether the Python API/GIL is +required or floating point errors may occur. +This function is also faster if you already know your needs without buffering. + +The ``NpyIter_IterationNeedsAPI`` function now performs all the checks that were +previously performed at setup time. While it was never necessary to call it +multiple times, doing so will now have a larger cost. + +(`gh-27998 `__) + + +New Features +============ + +* The type parameter of ``np.dtype`` now defaults to ``typing.Any``. + This way, static type-checkers will infer ``dtype: np.dtype`` as + ``dtype: np.dtype[Any]``, without reporting an error. + + (`gh-28669 `__) + +* Static type-checkers now interpret: + + - ``_: np.ndarray`` as ``_: npt.NDArray[typing.Any]``. + - ``_: np.flatiter`` as ``_: np.flatiter[np.ndarray]``. + + This is because their type parameters now have default values. + + (`gh-28940 `__) + +NumPy now registers its pkg-config paths with the pkgconf_ PyPI package +----------------------------------------------------------------------- +The pkgconf_ PyPI package provides an interface for projects like NumPy to +register their own paths to be added to the pkg-config search path. This means +that when using pkgconf_ from PyPI, NumPy will be discoverable without needing +for any custom environment configuration. + +.. attention:: Attention + + This only applies when using the pkgconf_ package from PyPI_, or put another + way, this only applies when installing pkgconf_ via a Python package + manager. + + If you are using ``pkg-config`` or ``pkgconf`` provided by your system, or + any other source that does not use the pkgconf-pypi_ project, the NumPy + pkg-config directory will not be automatically added to the search path. In + these situations, you might want to use ``numpy-config``. + + +.. _pkgconf: https://github.com/pypackaging-native/pkgconf-pypi +.. _PyPI: https://pypi.org/ +.. _pkgconf-pypi: https://github.com/pypackaging-native/pkgconf-pypi + +(`gh-28214 `__) + +Allow ``out=...`` in ufuncs to ensure array result +-------------------------------------------------- +NumPy has the sometimes difficult behavior that it currently usually +returns scalars rather than 0-D arrays (even if the inputs were 0-D arrays). +This is especially problematic for non-numerical dtypes (e.g. ``object``). + +For ufuncs (i.e. most simple math functions) it is now possible to use +``out=...`` (literally \`...\`, e.g. ``out=Ellipsis``) which is identical in +behavior to ``out`` not being passed, but will ensure a non-scalar return. +This spelling is borrowed from ``arr1d[0, ...]`` where the ``...`` also ensures +a non-scalar return. + +Other functions with an ``out=`` kwarg should gain support eventually. +Downstream libraries that interoperate via ``__array_ufunc__`` or +``__array_function__`` may need to adapt to support this. + +(`gh-28576 `__) + +Building NumPy with OpenMP Parallelization +------------------------------------------ +NumPy now supports OpenMP parallel processing capabilities when built with the +``-Denable_openmp=true`` Meson build flag. This feature is disabled by default. +When enabled, ``np.sort`` and ``np.argsort`` functions can utilize OpenMP for +parallel thread execution, improving performance for these operations. + +(`gh-28619 `__) + +Interactive examples in the NumPy documentation +----------------------------------------------- +The NumPy documentation includes a number of examples that +can now be run interactively in your browser using WebAssembly +and Pyodide. + +Please note that the examples are currently experimental in +nature and may not work as expected for all methods in the +public API. + +(`gh-26745 `__) + + +Improvements +============ + +* Scalar comparisons between non-comparable dtypes such as + ``np.array(1) == np.array('s')`` now return a NumPy bool instead of + a Python bool. + + (`gh-27288 `__) + +* ``np.nditer`` now has no limit on the number of supported operands + (C-integer). + + (`gh-28080 `__) + +* No-copy pickling is now supported for any + array that can be transposed to a C-contiguous array. + + (`gh-28105 `__) + +* The ``__repr__`` for user-defined dtypes now prefers the ``__name__`` of the + custom dtype over a more generic name constructed from its ``kind`` and + ``itemsize``. + + (`gh-28250 `__) + +* ``np.dot`` now reports floating point exceptions. + + (`gh-28442 `__) + +* ``np.dtypes.StringDType`` is now a + `generic type `_ which + accepts a type argument for ``na_object`` that defaults to ``typing.Never``. + For example, ``StringDType(na_object=None)`` returns a ``StringDType[None]``, + and ``StringDType()`` returns a ``StringDType[typing.Never]``. + + (`gh-28856 `__) + +Added warnings to ``np.isclose`` +-------------------------------- +Added warning messages if at least one of atol or rtol are either ``np.nan`` or +``np.inf`` within ``np.isclose``. + +* Warnings follow the user's ``np.seterr`` settings + +(`gh-28205 `__) + + +Performance improvements and changes +==================================== + +Performance improvements to ``np.unique`` +----------------------------------------- +``np.unique`` now tries to use a hash table to find unique values instead of +sorting values before finding unique values. This is limited to certain dtypes +for now, and the function is now faster for those dtypes. The function now also +exposes a ``sorted`` parameter to allow returning unique values as they were +found, instead of sorting them afterwards. + +(`gh-26018 `__) + +Performance improvements to ``np.sort`` and ``np.argsort`` +---------------------------------------------------------- +``np.sort`` and ``np.argsort`` functions now can leverage OpenMP for parallel +thread execution, resulting in up to 3.5x speedups on x86 architectures with +AVX2 or AVX-512 instructions. This opt-in feature requires NumPy to be built +with the -Denable_openmp Meson flag. Users can control the number of threads +used by setting the OMP_NUM_THREADS environment variable. + +(`gh-28619 `__) + +Performance improvements for ``np.float16`` casts +------------------------------------------------- +Earlier, floating point casts to and from ``np.float16`` types +were emulated in software on all platforms. + +Now, on ARM devices that support Neon float16 intrinsics (such as +recent Apple Silicon), the native float16 path is used to achieve +the best performance. + +(`gh-28769 `__) + +Performance improvements for ``np.matmul`` +------------------------------------------ +Enable using BLAS for ``matmul`` even when operands are non-contiguous by copying +if needed. + +(`gh-23752 `__) + +Changes +======= + +* The vector norm ``ord=inf`` and the matrix norms ``ord={1, 2, inf, 'nuc'}`` + now always returns zero for empty arrays. Empty arrays have at least one axis + of size zero. This affects ``np.linalg.norm``, ``np.linalg.vector_norm``, and + ``np.linalg.matrix_norm``. Previously, NumPy would raises errors or return + zero depending on the shape of the array. + + (`gh-28343 `__) + +* A spelling error in the error message returned when converting a string to a + float with the method ``np.format_float_positional`` has been fixed. + + (`gh-28569 `__) + +* NumPy's ``__array_api_version__`` was upgraded from ``2023.12`` to ``2024.12``. + +* ``numpy.count_nonzero`` for ``axis=None`` (default) now returns a NumPy scalar + instead of a Python integer. + +* The parameter ``axis`` in ``numpy.take_along_axis`` function has now a default + value of ``-1``. + + (`gh-28615 `__) + +* Printing of ``np.float16`` and ``np.float32`` scalars and arrays have been improved by + adjusting the transition to scientific notation based on the floating point precision. + A new legacy ``np.printoptions`` mode ``'2.2'`` has been added for backwards compatibility. + + (`gh-28703 `__) + +* Multiplication between a string and integer now raises OverflowError instead + of MemoryError if the result of the multiplication would create a string that + is too large to be represented. This follows Python's behavior. + + (`gh-29060 `__) + +``unique_values`` may return unsorted data +------------------------------------------ +The relatively new function (added in NumPy 2.0) ``unique_values`` may now +return unsorted results. Just as ``unique_counts`` and ``unique_all`` these +never guaranteed a sorted result, however, the result was sorted until now. In +cases where these do return a sorted result, this may change in future releases +to improve performance. + +(`gh-26018 `__) + +Changes to the main iterator and potential numerical changes +------------------------------------------------------------ +The main iterator, used in math functions and via ``np.nditer`` from Python and +``NpyIter`` in C, now behaves differently for some buffered iterations. This +means that: + +* The buffer size used will often be smaller than the maximum buffer sized + allowed by the ``buffersize`` parameter. + +* The "growinner" flag is now honored with buffered reductions when no operand + requires buffering. + +For ``np.sum()`` such changes in buffersize may slightly change numerical +results of floating point operations. Users who use "growinner" for custom +reductions could notice changes in precision (for example, in NumPy we removed +it from ``einsum`` to avoid most precision changes and improve precision for +some 64bit floating point inputs). + +(`gh-27883 `__) + +The minimum supported GCC version is now 9.3.0 +---------------------------------------------- +The minimum supported version was updated from 8.4.0 to 9.3.0, primarily in +order to reduce the chance of platform-specific bugs in old GCC versions from +causing issues. + +(`gh-28102 `__) + +Changes to automatic bin selection in numpy.histogram +----------------------------------------------------- +The automatic bin selection algorithm in ``numpy.histogram`` has been modified +to avoid out-of-memory errors for samples with low variation. For full control +over the selected bins the user can use set the ``bin`` or ``range`` parameters +of ``numpy.histogram``. + +(`gh-28426 `__) + +Build manylinux_2_28 wheels +--------------------------- +Wheels for linux systems will use the ``manylinux_2_28`` tag (instead of the +``manylinux2014`` tag), which means dropping support for redhat7/centos7, +amazonlinux2, debian9, ubuntu18.04, and other pre-glibc2.28 operating system +versions, as per the `PEP 600 support table`_. + +.. _`PEP 600 support table`: https://github.com/mayeut/pep600_compliance?tab=readme-ov-file#pep600-compliance-check + +(`gh-28436 `__) +Remove use of -Wl,-ld_classic on macOS +-------------------------------------- +Remove use of -Wl,-ld_classic on macOS. This hack is no longer needed by Spack, +and results in libraries that cannot link to other libraries built with ld +(new). -.. if release snippets have been incorporated already, uncomment the follow - line (leave the `.. include:: directive) +(`gh-28713 `__) -.. **Content from release note snippets in doc/release/upcoming_changes:** +Re-enable overriding functions in the ``numpy.strings`` +------------------------------------------------------- +Re-enable overriding functions in the ``numpy.strings`` module. -.. include:: notes-towncrier.rst +(`gh-28741 `__) diff --git a/doc/source/release/2.3.1-notes.rst b/doc/source/release/2.3.1-notes.rst new file mode 100644 index 000000000000..d8193f07671c --- /dev/null +++ b/doc/source/release/2.3.1-notes.rst @@ -0,0 +1,53 @@ +.. currentmodule:: numpy + +========================= +NumPy 2.3.1 Release Notes +========================= + +The NumPy 2.3.1 release is a patch release with several bug fixes, annotation +improvements, and better support for OpenBSD. Highlights are: + +- Fix bug in ``matmul`` for non-contiguous out kwarg parameter +- Fix for Accelerate runtime warnings on M4 hardware +- Fix new in NumPy 2.3.0 ``np.vectorize`` casting errors +- Improved support of cpu features for FreeBSD and OpenBSD + +This release supports Python versions 3.11-3.13, Python 3.14 will be supported +when it is released. + + +Contributors +============ + +A total of 9 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* Brad Smith + +* Charles Harris +* Developer-Ecosystem-Engineering +* François Rozet +* Joren Hammudoglu +* Matti Picus +* Mugundan Selvanayagam +* Nathan Goldbaum +* Sebastian Berg + + +Pull requests merged +==================== + +A total of 12 pull requests were merged for this release. + +* `#29140 `__: MAINT: Prepare 2.3.x for further development +* `#29191 `__: BUG: fix matmul with transposed out arg (#29179) +* `#29192 `__: TYP: Backport typing fixes and improvements. +* `#29205 `__: BUG: Revert ``np.vectorize`` casting to legacy behavior (#29196) +* `#29222 `__: TYP: Backport typing fixes +* `#29233 `__: BUG: avoid negating unsigned integers in resize implementation... +* `#29234 `__: TST: Fix test that uses uninitialized memory (#29232) +* `#29235 `__: BUG: Address interaction between SME and FPSR (#29223) +* `#29237 `__: BUG: Enforce integer limitation in concatenate (#29231) +* `#29238 `__: CI: Add support for building NumPy with LLVM for Win-ARM64 +* `#29241 `__: ENH: Detect CPU features on OpenBSD ARM and PowerPC64 +* `#29242 `__: ENH: Detect CPU features on FreeBSD / OpenBSD RISC-V64. + diff --git a/doc/source/release/2.3.2-notes.rst b/doc/source/release/2.3.2-notes.rst new file mode 100644 index 000000000000..2acc400c89fe --- /dev/null +++ b/doc/source/release/2.3.2-notes.rst @@ -0,0 +1,56 @@ +.. currentmodule:: numpy + +========================= +NumPy 2.3.2 Release Notes +========================= + +The NumPy 2.3.2 release is a patch release with a number of bug fixes and +maintenance updates. The highlights are: + +- Wheels for Python 3.14.0rc1 +- PyPy updated to the latest stable release +- OpenBLAS updated to 0.3.30 + +This release supports Python versions 3.11-3.14 + + +Contributors +============ + +A total of 9 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* !DWesl +* Charles Harris +* Joren Hammudoglu +* Maanas Arora +* Marco Edward Gorelli +* Matti Picus +* Nathan Goldbaum +* Sebastian Berg +* kostayScr + + + +Pull requests merged +==================== + +A total of 16 pull requests were merged for this release. + +* `#29256 `__: MAINT: Prepare 2.3.x for further development +* `#29283 `__: TYP: Work around a mypy issue with bool arrays (#29248) +* `#29284 `__: BUG: fix fencepost error in StringDType internals +* `#29287 `__: BUG: handle case in mapiter where descriptors might get replaced... +* `#29350 `__: BUG: Fix shape error path in array-interface +* `#29412 `__: BUG: Allow reading non-npy files in npz and add test +* `#29413 `__: TST: Avoid uninitialized values in test (#29341) +* `#29414 `__: BUG: Fix reference leakage for output arrays in reduction functions +* `#29415 `__: BUG: fix casting issue in center, ljust, rjust, and zfill (#29369) +* `#29416 `__: TYP: Fix overloads in ``np.char.array`` and ``np.char.asarray``... +* `#29417 `__: BUG: Any dtype should call ``square`` on ``arr \*\* 2`` (#29392) +* `#29424 `__: MAINT: use a stable pypy release in CI +* `#29425 `__: MAINT: Support python 314rc1 +* `#29429 `__: MAINT: Update highway to match main. +* `#29430 `__: BLD: use github to build macos-arm64 wheels with OpenBLAS and... +* `#29437 `__: BUG: fix datetime/timedelta hash memory leak (#29411) + + diff --git a/doc/source/release/2.3.3-notes.rst b/doc/source/release/2.3.3-notes.rst new file mode 100644 index 000000000000..3c293c3db322 --- /dev/null +++ b/doc/source/release/2.3.3-notes.rst @@ -0,0 +1,59 @@ +.. currentmodule:: numpy + +========================= +NumPy 2.3.3 Release Notes +========================= + +The NumPy 2.3.3 release is a patch release split between a number of maintenance +updates and bug fixes. This release supports Python versions 3.11-3.14. Note +that the 3.14.0 final is currently expected in Oct, 2025. This release is based +on 3.14.0rc2. + +Contributors +============ + +A total of 13 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* Aleksandr A. Voyt + +* Bernard Roesler + +* Charles Harris +* Hunter Hogan + +* Joren Hammudoglu +* Maanas Arora +* Matti Picus +* Nathan Goldbaum +* Raghuveer Devulapalli +* Sanjay Kumar Sakamuri Kamalakar + +* Tobias Markus + +* Warren Weckesser +* Zebreus + + +Pull requests merged +==================== + +A total of 23 pull requests were merged for this release. + +* `#29440 `__: MAINT: Prepare 2.3.x for further development. +* `#29446 `__: BUG: Fix test_configtool_pkgconfigdir to resolve PKG_CONFIG_DIR... +* `#29447 `__: BLD: allow targeting webassembly without emscripten +* `#29460 `__: MAINT: Backport write_release.py +* `#29473 `__: MAINT: Bump pypa/cibuildwheel from 3.1.0 to 3.1.2 +* `#29500 `__: BUG: Always return a real dtype from linalg.cond (gh-18304) (#29333) +* `#29501 `__: MAINT: Add .file entry to all .s SVML files +* `#29556 `__: BUG: Casting from one timedelta64 to another didn't handle NAT. +* `#29562 `__: BLD: update vendored Meson to 1.8.3 [wheel build] +* `#29563 `__: BUG: Fix metadata not roundtripping when pickling datetime (#29555) +* `#29587 `__: TST: update link and version for Intel SDE download +* `#29593 `__: TYP: add ``sorted`` kwarg to ``unique`` +* `#29672 `__: MAINT: Update pythoncapi-compat from main. +* `#29673 `__: MAINT: Update cibuildwheel. +* `#29674 `__: MAINT: Fix typo in wheels.yml +* `#29683 `__: BUG, BLD: Correct regex for ppc64 VSX3/VSX4 feature detection +* `#29684 `__: TYP: ndarray.fill() takes no keyword arguments +* `#29685 `__: BUG: avoid thread-unsafe refcount check in temp elision +* `#29687 `__: CI: replace comment-hider action in mypy_primer workflow +* `#29689 `__: BLD: Add missing include +* `#29691 `__: BUG: use correct input dtype in flatiter assignment +* `#29700 `__: TYP: fix np.bool method declarations +* `#29701 `__: BUG: Correct ambiguous logic for s390x CPU feature detection diff --git a/doc/source/release/2.3.4-notes.rst b/doc/source/release/2.3.4-notes.rst new file mode 100644 index 000000000000..6ba7c06b7514 --- /dev/null +++ b/doc/source/release/2.3.4-notes.rst @@ -0,0 +1,83 @@ +.. currentmodule:: numpy + +========================== +NumPy 2.3.4 Release Notes +========================== + +The NumPy 2.3.4 release is a patch release split between a number of maintenance +updates and bug fixes. This release supports Python versions 3.11-3.14. This +release is based on the Python 3.14.0 final. + + +Changes +======= + +The ``npymath`` and ``npyrandom`` libraries now have a ``.lib`` rather than a +``.a`` file extension on win-arm64, for compatibility for building with MSVC and +``setuptools``. Please note that using these static libraries is discouraged +and for existing projects using it, it's best to use it with a matching +compiler toolchain, which is ``clang-cl`` on Windows on Arm. + +(`gh-29750 `__) + + +Contributors +============ + +A total of 17 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* !DWesl +* Charles Harris +* Christian Barbia + +* Evgeni Burovski +* Joren Hammudoglu +* Maaz + +* Mateusz SokÃŗÅ‚ +* Matti Picus +* Nathan Goldbaum +* Ralf Gommers +* Riku Sakamoto + +* Sandeep Gupta + +* Sayed Awad +* Sebastian Berg +* Sergey Fedorov + +* Warren Weckesser +* dependabot[bot] + +Pull requests merged +==================== + +A total of 30 pull requests were merged for this release. + +* `#29725 `__: MAINT: Prepare 2.3.x for further development +* `#29781 `__: MAINT: Pin some upstream dependences +* `#29782 `__: BLD: enable x86-simd-sort to build on KNL with -mavx512f +* `#29783 `__: BUG: Include python-including headers first (#29281) +* `#29784 `__: TYP: fix np.number and np.\*integer method declaration +* `#29785 `__: TYP: mypy 1.18.1 +* `#29788 `__: TYP: replace scalar type __init__ with __new__ +* `#29790 `__: BUG: Fix ``dtype`` refcount in ``__array__`` (#29715) +* `#29791 `__: TYP: fix method declarations in floating, timedelta64, and datetime64Backport +* `#29792 `__: MAINT: delete unused variables in unary logical dispatch +* `#29797 `__: BUG: Fix pocketfft umath strides for AIX compatibility (#29768) +* `#29798 `__: BUG: np.setbufsize should raise ValueError for negative input +* `#29799 `__: BUG: Fix assert in nditer buffer setup +* `#29800 `__: BUG: Stable ScalarType ordering +* `#29838 `__: TST: Pin pyparsing to avoid matplotlib errors. +* `#29839 `__: BUG: linalg: emit a MemoryError on a malloc failure (#29811) +* `#29840 `__: BLD: change file extension for libnpymath on win-arm64 from .a... +* `#29864 `__: CI: Fix loongarch64 CI (#29856) +* `#29865 `__: TYP: Various typing fixes +* `#29910 `__: BUG: Fix float16-sort failures on 32-bit x86 MSVC (#29908) +* `#29911 `__: TYP: add missing ``__slots__`` (#29901) +* `#29913 `__: TYP: wrong argument defaults in ``testing._private`` (#29902) +* `#29920 `__: BUG: avoid segmentation fault in string_expandtabs_length_promoter +* `#29921 `__: BUG: Fix INT_MIN % -1 to return 0 for all signed integer types... +* `#29922 `__: TYP: minor fixes related to ``errstate`` (#29914) +* `#29923 `__: TST: use requirements/test_requirements across CI (#29919) +* `#29926 `__: BUG: fix negative samples generated by Wald distribution (#29609) +* `#29940 `__: MAINT: Bump pypa/cibuildwheel from 3.1.4 to 3.2.1 +* `#29949 `__: STY: rename @classmethod arg to cls +* `#29950 `__: MAINT: Simplify string arena growth strategy (#29885) + diff --git a/doc/source/release/2.3.5-notes.rst b/doc/source/release/2.3.5-notes.rst new file mode 100644 index 000000000000..8013ef468055 --- /dev/null +++ b/doc/source/release/2.3.5-notes.rst @@ -0,0 +1,50 @@ +.. currentmodule:: numpy + +========================= +NumPy 2.3.5 Release Notes +========================= + +The NumPy 2.3.5 release is a patch release split between a number of maintenance +updates and bug fixes. This release supports Python versions 3.11-3.14. + + +Contributors +============ + +A total of 10 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* Aaron Kollasch + +* Charles Harris +* Joren Hammudoglu +* Matti Picus +* Nathan Goldbaum +* Rafael Laboissière + +* Sayed Awad +* Sebastian Berg +* Warren Weckesser +* Yasir Ashfaq + + + +Pull requests merged +==================== + +A total of 16 pull requests were merged for this release. + +* `#29979 `__: MAINT: Prepare 2.3.x for further development +* `#30026 `__: SIMD, BLD: Backport FPMATH mode on x86-32 and filter successor... +* `#30029 `__: MAINT: Backport write_release.py +* `#30041 `__: TYP: Various typing updates +* `#30059 `__: BUG: Fix np.strings.slice if stop=None or start and stop >= len... +* `#30063 `__: BUG: Fix np.strings.slice if start > stop +* `#30076 `__: BUG: avoid negating INT_MIN in PyArray_Round implementation (#30071) +* `#30090 `__: BUG: Fix resize when it contains references (#29970) +* `#30129 `__: BLD: update scipy-openblas, use -Dpkg_config_path (#30049) +* `#30130 `__: BUG: Avoid compilation error of wrapper file generated with SWIG... +* `#30157 `__: BLD: use scipy-openblas 0.3.30.7 (#30132) +* `#30158 `__: DOC: Remove nonexistent ``order`` parameter docs of ``ma.asanyarray``... +* `#30185 `__: BUG: Fix check of PyMem_Calloc return value. (#30176) +* `#30217 `__: DOC: fix links for newly rebuilt numpy-tutorials site +* `#30218 `__: BUG: Fix build on s390x with clang (#30214) +* `#30237 `__: ENH: Make FPE blas check a runtime check for all apple arm systems + diff --git a/doc/source/release/2.4.0-notes.rst b/doc/source/release/2.4.0-notes.rst new file mode 100644 index 000000000000..29a7e5ce6073 --- /dev/null +++ b/doc/source/release/2.4.0-notes.rst @@ -0,0 +1,19 @@ +.. currentmodule:: numpy + +========================== +NumPy 2.4.0 Release Notes +========================== + + +Highlights +========== + +*We'll choose highlights for this release near the end of the release cycle.* + + +.. if release snippets have been incorporated already, uncomment the follow + line (leave the `.. include:: directive) + +.. **Content from release note snippets in doc/release/upcoming_changes:** + +.. include:: notes-towncrier.rst diff --git a/doc/source/release/2.5.0-notes.rst b/doc/source/release/2.5.0-notes.rst new file mode 100644 index 000000000000..1c07e859a7b9 --- /dev/null +++ b/doc/source/release/2.5.0-notes.rst @@ -0,0 +1,19 @@ +.. currentmodule:: numpy + +========================== +NumPy 2.5.0 Release Notes +========================== + + +Highlights +========== + +*We'll choose highlights for this release near the end of the release cycle.* + + +.. if release snippets have been incorporated already, uncomment the follow + line (leave the `.. include:: directive) + +.. **Content from release note snippets in doc/release/upcoming_changes:** + +.. include:: notes-towncrier.rst diff --git a/doc/source/try_examples.json b/doc/source/try_examples.json index 510efcdd2694..823d4a5d1e82 100644 --- a/doc/source/try_examples.json +++ b/doc/source/try_examples.json @@ -1,8 +1,8 @@ { "global_min_height": "400px", "ignore_patterns": [ - "distutils.html*", "reference\/typing.html*", "numpy.__array_namespace_info__.html*" ] } + diff --git a/doc/source/user/absolute_beginners.rst b/doc/source/user/absolute_beginners.rst index d0d7e70fa284..f1007db45acc 100644 --- a/doc/source/user/absolute_beginners.rst +++ b/doc/source/user/absolute_beginners.rst @@ -779,7 +779,7 @@ You can add the arrays together with the plus sign. :: >>> data = np.array([1, 2]) - >>> ones = np.ones(2, dtype=int) + >>> ones = np.ones(2, dtype=np.int_) >>> data + ones array([2, 3]) @@ -863,12 +863,13 @@ NumPy also performs aggregation functions. In addition to ``min``, ``max``, and result of multiplying the elements together, ``std`` to get the standard deviation, and more. :: + >>> data = np.array([1, 2, 3]) >>> data.max() - 2.0 + 3 >>> data.min() - 1.0 + 1 >>> data.sum() - 3.0 + 6 .. image:: images/np_aggregation.png @@ -1347,7 +1348,7 @@ For example:: With a single iterable argument, return its biggest item. The default keyword-only argument specifies an object to return if the provided iterable is empty. - With two or more arguments, return the largest argument. + With two or more ...arguments, return the largest argument. @@ -1740,4 +1741,5 @@ For directions regarding installing Matplotlib, see the official ------------------------------------------------------- -*Image credits: Jay Alammar https://jalammar.github.io/* +*Image credits: Jay Alammar* +`https://jalammar.github.io/ `_ diff --git a/doc/source/user/basics.copies.rst b/doc/source/user/basics.copies.rst index ec373673d815..6d8e78488e7e 100644 --- a/doc/source/user/basics.copies.rst +++ b/doc/source/user/basics.copies.rst @@ -123,8 +123,8 @@ shape attribute of the array. For example:: AttributeError: Incompatible shape for in-place modification. Use `.reshape()` to make a copy with the desired shape. -Taking the example of another operation, :func:`.ravel` returns a contiguous -flattened view of the array wherever possible. On the other hand, +Taking the example of another operation, :func:`numpy.ravel` returns a +contiguous flattened view of the array wherever possible. On the other hand, :meth:`.ndarray.flatten` always returns a flattened copy of the array. However, to guarantee a view in most cases, ``x.reshape(-1)`` may be preferable. diff --git a/doc/source/user/basics.creation.rst b/doc/source/user/basics.creation.rst index 1a7707ee69c9..19fa737d5f8d 100644 --- a/doc/source/user/basics.creation.rst +++ b/doc/source/user/basics.creation.rst @@ -20,7 +20,7 @@ There are 6 general mechanisms for creating arrays: 6) Use of special library functions (e.g., random) You can use these methods to create ndarrays or :ref:`structured_arrays`. -This document will cover general methods for ndarray creation. +This document will cover general methods for ndarray creation. 1) Converting Python sequences to NumPy arrays ============================================== @@ -29,8 +29,8 @@ NumPy arrays can be defined using Python sequences such as lists and tuples. Lists and tuples are defined using ``[...]`` and ``(...)``, respectively. Lists and tuples can define ndarray creation: -* a list of numbers will create a 1D array, -* a list of lists will create a 2D array, +* a list of numbers will create a 1D array, +* a list of lists will create a 2D array, * further nested lists will create higher-dimensional arrays. In general, any array object is called an **ndarray** in NumPy. :: @@ -72,7 +72,7 @@ results, for example:: Notice when you perform operations with two arrays of the same ``dtype``: ``uint32``, the resulting array is the same type. When you -perform operations with different ``dtype``, NumPy will +perform operations with different ``dtype``, NumPy will assign a new type that satisfies all of the array elements involved in the computation, here ``uint32`` and ``int32`` can both be represented in as ``int64``. @@ -88,7 +88,7 @@ you create the array. .. 40 functions seems like a small number, but the routines.array-creation - has ~47. I'm sure there are more. + has ~47. I'm sure there are more. NumPy has over 40 built-in functions for creating arrays as laid out in the :ref:`Array creation routines `. @@ -104,7 +104,7 @@ dimension of the array they create: The 1D array creation functions e.g. :func:`numpy.linspace` and :func:`numpy.arange` generally need at least two inputs, ``start`` and -``stop``. +``stop``. :func:`numpy.arange` creates arrays with regularly incrementing values. Check the documentation for complete information and examples. A few @@ -113,7 +113,7 @@ examples are shown:: >>> import numpy as np >>> np.arange(10) array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) - >>> np.arange(2, 10, dtype=float) + >>> np.arange(2, 10, dtype=np.float64) array([2., 3., 4., 5., 6., 7., 8., 9.]) >>> np.arange(2, 3, 0.1) array([2. , 2.1, 2.2, 2.3, 2.4, 2.5, 2.6, 2.7, 2.8, 2.9]) @@ -121,8 +121,8 @@ examples are shown:: Note: best practice for :func:`numpy.arange` is to use integer start, end, and step values. There are some subtleties regarding ``dtype``. In the second example, the ``dtype`` is defined. In the third example, the array is -``dtype=float`` to accommodate the step size of ``0.1``. Due to roundoff error, -the ``stop`` value is sometimes included. +``dtype=np.float64`` to accommodate the step size of ``0.1``. Due to roundoff error, +the ``stop`` value is sometimes included. :func:`numpy.linspace` will create arrays with a specified number of elements, and spaced equally between the specified beginning and end values. For @@ -140,7 +140,7 @@ number of elements and the starting and end point. The previous ------------------------------- The 2D array creation functions e.g. :func:`numpy.eye`, :func:`numpy.diag`, and :func:`numpy.vander` -define properties of special matrices represented as 2D arrays. +define properties of special matrices represented as 2D arrays. ``np.eye(n, m)`` defines a 2D identity matrix. The elements where i=j (row index and column index are equal) are 1 and the rest are 0, as such:: @@ -159,7 +159,7 @@ and the rest are 0, as such:: the diagonal *or* if given a 2D array returns a 1D array that is only the diagonal elements. The two array creation functions can be helpful while doing linear algebra, as such:: - + >>> import numpy as np >>> np.diag([1, 2, 3]) array([[1, 0, 0], @@ -197,7 +197,7 @@ routine is helpful in generating linear least squares models, as such:: [ 8, 4, 2, 1], [27, 9, 3, 1], [64, 16, 4, 1]]) - + 3 - general ndarray creation functions -------------------------------------- @@ -205,20 +205,20 @@ The ndarray creation functions e.g. :func:`numpy.ones`, :func:`numpy.zeros`, and :meth:`~numpy.random.Generator.random` define arrays based upon the desired shape. The ndarray creation functions can create arrays with any dimension by specifying how many dimensions -and length along that dimension in a tuple or list. +and length along that dimension in a tuple or list. :func:`numpy.zeros` will create an array filled with 0 values with the specified shape. The default dtype is ``float64``:: >>> import numpy as np >>> np.zeros((2, 3)) - array([[0., 0., 0.], + array([[0., 0., 0.], [0., 0., 0.]]) >>> np.zeros((2, 3, 2)) array([[[0., 0.], [0., 0.], [0., 0.]], - + [[0., 0.], [0., 0.], [0., 0.]]]) @@ -228,7 +228,7 @@ specified shape. The default dtype is ``float64``:: >>> import numpy as np >>> np.ones((2, 3)) - array([[1., 1., 1.], + array([[1., 1., 1.], [1., 1., 1.]]) >>> np.ones((2, 3, 2)) array([[[1., 1.], @@ -265,11 +265,11 @@ dimension:: >>> import numpy as np >>> np.indices((3,3)) - array([[[0, 0, 0], - [1, 1, 1], - [2, 2, 2]], - [[0, 1, 2], - [0, 1, 2], + array([[[0, 0, 0], + [1, 1, 1], + [2, 2, 2]], + [[0, 1, 2], + [0, 1, 2], [0, 1, 2]]]) This is particularly useful for evaluating functions of multiple dimensions on @@ -322,7 +322,7 @@ arrays into a 4-by-4 array using ``block``:: [ 0., 0., 0., -4.]]) Other routines use similar syntax to join ndarrays. Check the -routine's documentation for further examples and syntax. +routine's documentation for further examples and syntax. 4) Reading arrays from disk, either from standard or custom formats =================================================================== @@ -330,7 +330,7 @@ routine's documentation for further examples and syntax. This is the most common case of large array creation. The details depend greatly on the format of data on disk. This section gives general pointers on how to handle various formats. For more detailed examples of IO look at -:ref:`How to Read and Write files `. +:ref:`How to Read and Write files `. Standard binary formats ----------------------- @@ -397,4 +397,4 @@ knowledge to interface with C or C++. NumPy is the fundamental library for array containers in the Python Scientific Computing stack. Many Python libraries, including SciPy, Pandas, and OpenCV, use NumPy ndarrays as the common format for data exchange, These libraries can create, -operate on, and work with NumPy arrays. +operate on, and work with NumPy arrays. diff --git a/doc/source/user/basics.dispatch.rst b/doc/source/user/basics.dispatch.rst index ae53995a3917..117d60f85467 100644 --- a/doc/source/user/basics.dispatch.rst +++ b/doc/source/user/basics.dispatch.rst @@ -46,6 +46,21 @@ array([[1., 0., 0., 0., 0.], [0., 0., 0., 1., 0.], [0., 0., 0., 0., 1.]]) +The ``__array__`` method can optionally accept a `dtype` argument. If provided, +this argument specifies the desired data type for the resulting NumPy array. +Your implementation should attempt to convert the data to this `dtype` +if possible. If the conversion is not supported, it's generally best +to fall back to a default type or raise a `TypeError` or `ValueError`. + +Here's an example demonstrating its use with `dtype` specification: + +>>> np.asarray(arr, dtype=np.float32) +array([[1., 0., 0., 0., 0.], + [0., 1., 0., 0., 0.], + [0., 0., 1., 0., 0.], + [0., 0., 0., 1., 0.], + [0., 0., 0., 0., 1.]], dtype=float32) + If we operate on ``arr`` with a numpy function, numpy will again use the ``__array__`` interface to convert it to an array and then apply the function in the usual way. diff --git a/doc/source/user/basics.interoperability.rst b/doc/source/user/basics.interoperability.rst index ca0c39d7081f..b1c115ff1de0 100644 --- a/doc/source/user/basics.interoperability.rst +++ b/doc/source/user/basics.interoperability.rst @@ -65,6 +65,18 @@ and outputs a NumPy ndarray (which is generally a view of the input object's dat buffer). The :ref:`dlpack:python-spec` page explains the ``__dlpack__`` protocol in detail. +``dtype`` interoperability +~~~~~~~~~~~~~~~~~~~~~~~~~~ +Similar to ``__array__()`` for array objects, defining ``__numpy_dtype__`` +allows a custom dtype object to be interoperable with NumPy. +The ``__numpy_dtype__`` must return a NumPy dtype instance (note that +``np.float64`` is not a dtype instance, ``np.dtype(np.float64)`` is). + +.. versionadded:: 2.4 + Before NumPy 2.4 a ``.dtype`` attribute was treated similarly. As of NumPy 2.4 + both is accepted and implementing ``__numpy_dtype__`` prevents ``.dtype`` + from being checked. + The array interface protocol ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ diff --git a/doc/source/user/basics.io.genfromtxt.rst b/doc/source/user/basics.io.genfromtxt.rst index d5b6bba8f28d..2a1523ba209b 100644 --- a/doc/source/user/basics.io.genfromtxt.rst +++ b/doc/source/user/basics.io.genfromtxt.rst @@ -201,16 +201,16 @@ The main way to control how the sequences of strings we have read from the file are converted to other types is to set the ``dtype`` argument. Acceptable values for this argument are: -* a single type, such as ``dtype=float``. +* a single type, such as ``dtype=np.float64``. The output will be 2D with the given dtype, unless a name has been associated with each column with the use of the ``names`` argument - (see below). Note that ``dtype=float`` is the default for + (see below). Note that ``dtype=np.float64`` is the default for :func:`~numpy.genfromtxt`. -* a sequence of types, such as ``dtype=(int, float, float)``. +* a sequence of types, such as ``dtype=(np.int_, np.float64, np.float64)``. * a comma-separated string, such as ``dtype="i4,f8,|U3"``. * a dictionary with two keys ``'names'`` and ``'formats'``. * a sequence of tuples ``(name, type)``, such as - ``dtype=[('A', int), ('B', float)]``. + ``dtype=[('A', np.int_), ('B', np.float64)]``. * an existing :class:`numpy.dtype` object. * the special value ``None``. In that case, the type of the columns will be determined from the data @@ -243,7 +243,7 @@ each column. A first possibility is to use an explicit structured dtype, as mentioned previously:: >>> data = StringIO("1 2 3\n 4 5 6") - >>> np.genfromtxt(data, dtype=[(_, int) for _ in "abc"]) + >>> np.genfromtxt(data, dtype=[(_, np.int_) for _ in "abc"]) array([(1, 2, 3), (4, 5, 6)], dtype=[('a', '>> data = StringIO("1 2 3\n 4 5 6") - >>> ndtype=[('a',int), ('b', float), ('c', int)] + >>> ndtype=[('a', np.int_), ('b', np.float64), ('c', np.int_)] >>> names = ["A", "B", "C"] >>> np.genfromtxt(data, names=names, dtype=ndtype) array([(1, 2., 3), (4, 5., 6)], @@ -289,7 +289,7 @@ with the standard NumPy default of ``"f%i"``, yielding names like ``f0``, ``f1`` and so forth:: >>> data = StringIO("1 2 3\n 4 5 6") - >>> np.genfromtxt(data, dtype=(int, float, int)) + >>> np.genfromtxt(data, dtype=(np.int_, np.float64, np.int_)) array([(1, 2., 3), (4, 5., 6)], dtype=[('f0', '>> data = StringIO("1 2 3\n 4 5 6") - >>> np.genfromtxt(data, dtype=(int, float, int), names="a") + >>> np.genfromtxt(data, dtype=(np.int_, np.float64, np.int_), names="a") array([(1, 2., 3), (4, 5., 6)], dtype=[('a', '>> data = StringIO("1 2 3\n 4 5 6") - >>> np.genfromtxt(data, dtype=(int, float, int), defaultfmt="var_%02i") + >>> np.genfromtxt(data, dtype=(np.int_, np.float64, np.int_), defaultfmt="var_%02i") array([(1, 2., 3), (4, 5., 6)], dtype=[('var_00', '>> data = "N/A, 2, 3\n4, ,???" >>> kwargs = dict(delimiter=",", - ... dtype=int, + ... dtype=np.int_, ... names="a,b,c", ... missing_values={0:"N/A", 'b':" ", 2:"???"}, ... filling_values={0:0, 'b':0, 2:-999}) diff --git a/doc/source/user/basics.strings.rst b/doc/source/user/basics.strings.rst index 460bc1fe589f..cbbaa8f6e3b3 100644 --- a/doc/source/user/basics.strings.rst +++ b/doc/source/user/basics.strings.rst @@ -109,7 +109,7 @@ that empty strings are used to populate empty arrays: >>> np.empty(3, dtype=StringDType()) array(['', '', ''], dtype=StringDType()) -Optionally, you can pass create an instance of ``StringDType`` with +Optionally, you can create an instance of ``StringDType`` with support for missing values by passing ``na_object`` as a keyword argument for the initializer: diff --git a/doc/source/user/basics.subclassing.rst b/doc/source/user/basics.subclassing.rst index 7b1e8fd34512..202561a958a8 100644 --- a/doc/source/user/basics.subclassing.rst +++ b/doc/source/user/basics.subclassing.rst @@ -346,7 +346,7 @@ Simple example - adding an extra attribute to ndarray class InfoArray(np.ndarray): - def __new__(subtype, shape, dtype=float, buffer=None, offset=0, + def __new__(subtype, shape, dtype=np.float64, buffer=None, offset=0, strides=None, order=None, info=None): # Create the ndarray instance of our type, given the usual # ndarray input arguments. This will call the standard @@ -469,7 +469,7 @@ implemented. The signature of ``__array_ufunc__`` is:: - def __array_ufunc__(ufunc, method, *inputs, **kwargs): + def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): - *ufunc* is the ufunc object that was called. - *method* is a string indicating how the Ufunc was called, either @@ -567,7 +567,7 @@ which inputs and outputs it converted. Hence, e.g., Note that another approach would be to use ``getattr(ufunc, methods)(*inputs, **kwargs)`` instead of the ``super`` call. For this example, the result would be identical, but there is a difference if another operand -also defines ``__array_ufunc__``. E.g., lets assume that we evaluate +also defines ``__array_ufunc__``. E.g., let's assume that we evaluate ``np.add(a, b)``, where ``b`` is an instance of another class ``B`` that has an override. If you use ``super`` as in the example, ``ndarray.__array_ufunc__`` will notice that ``b`` has an override, which @@ -779,5 +779,3 @@ your function's signature should accept ``**kwargs``. For example: This object is now compatible with ``np.sum`` again because any extraneous arguments (i.e. keywords that are not ``axis`` or ``dtype``) will be hidden away in the ``**unused_kwargs`` parameter. - - diff --git a/doc/source/user/basics.types.rst b/doc/source/user/basics.types.rst index a605d32fcd51..d6914f437faa 100644 --- a/doc/source/user/basics.types.rst +++ b/doc/source/user/basics.types.rst @@ -35,18 +35,6 @@ See :ref:`arrays.dtypes.constructing` for more information about specifying and constructing data type objects, including how to specify parameters like the byte order. -To convert the type of an array, use the .astype() method. For example: :: - - >>> z.astype(np.float64) #doctest: +NORMALIZE_WHITESPACE - array([0., 1., 2.]) - -Note that, above, we could have used the *Python* float object as a dtype -instead of `numpy.float64`. NumPy knows that -:class:`int` refers to `numpy.int_`, :class:`bool` means -`numpy.bool`, that :class:`float` is `numpy.float64` and -:class:`complex` is `numpy.complex128`. The other data-types do not have -Python equivalents. - To determine the type of an array, look at the dtype attribute:: >>> z.dtype @@ -66,6 +54,28 @@ properties of the type, such as whether it is an integer:: >>> np.issubdtype(d, np.floating) False +To convert the type of an array, use the .astype() method. For example:: + + >>> z.astype(np.float64) #doctest: +NORMALIZE_WHITESPACE + array([0., 1., 2.]) + +Note that, above, we could have used the *Python* float object as a dtype +instead of `numpy.float64`. NumPy knows that +:class:`int` refers to `numpy.int_`, :class:`bool` means +`numpy.bool`, that :class:`float` is `numpy.float64` and +:class:`complex` is `numpy.complex128`. The other data-types do not have +Python equivalents. + +Sometimes the conversion can overflow, for instance when converting a `numpy.int64` value +300 to `numpy.int8`. NumPy follows C casting rules, so that value would overflow and +become 44 ``(300 - 256)``. If you wish to avoid such overflows, you can specify that the +overflow action fail by using ``same_value`` for the ``casting`` argument (see also +:ref:`overflow-errors`):: + + >>> z.astype(np.float64, casting="same_value") #doctest: +NORMALIZE_WHITESPACE + array([0., 1., 2.]) + + Numerical Data Types -------------------- @@ -217,7 +227,7 @@ confusion with builtin python type names, such as `numpy.bool_`. * - N/A - ``'P'`` - ``uintptr_t`` - - Guaranteed to hold pointers. Character code only (Python and C). + - Guaranteed to hold pointers without sign. Character code only (Python and C). * - `numpy.int32` or `numpy.int64` - `numpy.long` diff --git a/doc/source/user/basics.ufuncs.rst b/doc/source/user/basics.ufuncs.rst index 773fe86c21d2..5c91ab6c0168 100644 --- a/doc/source/user/basics.ufuncs.rst +++ b/doc/source/user/basics.ufuncs.rst @@ -18,6 +18,25 @@ is, a ufunc is a ":term:`vectorized `" wrapper for a function that takes a fixed number of specific inputs and produces a fixed number of specific outputs. +There are also :ref:`generalized ufuncs ` which +are functions over vectors (or arrays) instead of single-element scalars. +For example, :func:`numpy.add` is a ufunc that operates element-by-element, +while :func:`numpy.matmul` is a gufunc that operates on vectors/matrices:: + + >>> a = np.arange(6).reshape(3, 2) + >>> a + array([[0, 1], + [2, 3], + [4, 5]]) + >>> np.add(a, a) # element-wise addition + array([[ 0, 2], + [ 4, 6], + [ 8, 10]]) + >>> np.matmul(a, a.T) # matrix multiplication (3x2) @ (2x3) -> (3x3) + array([[ 1, 3, 5], + [ 3, 13, 23], + [ 5, 23, 41]]) + In NumPy, universal functions are instances of the :class:`numpy.ufunc` class. Many of the built-in functions are implemented in compiled C code. The basic ufuncs operate on scalars, but @@ -35,12 +54,30 @@ One can also produce custom :class:`numpy.ufunc` instances using the Ufunc methods ============= -All ufuncs have four methods. They can be found at -:ref:`ufuncs.methods`. However, these methods only make sense on scalar -ufuncs that take two input arguments and return one output argument. +All ufuncs have 5 methods. 4 reduce-like methods +(:meth:`~numpy.ufunc.reduce`, :meth:`~numpy.ufunc.accumulate`, +:meth:`~numpy.ufunc.reduceat`, :meth:`~numpy.ufunc.outer`) and one +for inplace operations (:meth:`~numpy.ufunc.at`). +See :ref:`ufuncs.methods` for more. However, these methods only make sense on +ufuncs that take two input arguments and return one output argument (so-called +"scalar" ufuncs since the inner loop operates on a single scalar value). Attempting to call these methods on other ufuncs will cause a :exc:`ValueError`. +For example, :func:`numpy.add` takes two inputs and returns one output, +so its methods work:: + + >>> np.add.reduce([1, 2, 3]) + 6 + +But :func:`numpy.divmod` returns two outputs (quotient and remainder), +so calling its methods raises an error:: + + >>> np.divmod.reduce([1, 2, 3]) + Traceback (most recent call last): + ... + ValueError: reduce only supported for functions returning a single value + The reduce-like methods all take an *axis* keyword, a *dtype* keyword, and an *out* keyword, and the arrays must all have dimension >= 1. The *axis* keyword specifies the axis of the array over which the reduction @@ -76,7 +113,7 @@ an integer (or Boolean) data-type and smaller than the size of the >>> x.dtype dtype('int64') - >>> np.multiply.reduce(x, dtype=float) + >>> np.multiply.reduce(x, dtype=np.float64) array([ 0., 28., 80.]) Finally, the *out* keyword allows you to @@ -84,10 +121,10 @@ provide an output array (or a tuple of output arrays for multi-output ufuncs). If *out* is given, the *dtype* argument is only used for the internal computations. Considering ``x`` from the previous example:: - >>> y = np.zeros(3, dtype=int) + >>> y = np.zeros(3, dtype=np.int_) >>> y array([0, 0, 0]) - >>> np.multiply.reduce(x, dtype=float, out=y) + >>> np.multiply.reduce(x, dtype=np.float64, out=y) array([ 0, 28, 80]) Ufuncs also have a fifth method, :func:`numpy.ufunc.at`, that allows in place diff --git a/doc/source/user/c-info.beyond-basics.rst b/doc/source/user/c-info.beyond-basics.rst index 7bf793ae2e47..eadeafe51e8e 100644 --- a/doc/source/user/c-info.beyond-basics.rst +++ b/doc/source/user/c-info.beyond-basics.rst @@ -268,7 +268,7 @@ specifies your data-type. This type number should be stored and made available by your module so that other modules can use it to recognize your data-type. -Note that this API is inherently thread-unsafe. See `thread_safety` for more +Note that this API is inherently thread-unsafe. See :ref:`thread_safety` for more details about thread safety in NumPy. diff --git a/doc/source/user/c-info.python-as-glue.rst b/doc/source/user/c-info.python-as-glue.rst index c699760fdebd..20d3f1bb5937 100644 --- a/doc/source/user/c-info.python-as-glue.rst +++ b/doc/source/user/c-info.python-as-glue.rst @@ -178,13 +178,7 @@ write in a ``setup.py`` file: Adding the NumPy include directory is, of course, only necessary if you are using NumPy arrays in the extension module (which is what we -assume you are using Cython for). The distutils extensions in NumPy -also include support for automatically producing the extension-module -and linking it from a ``.pyx`` file. It works so that if the user does -not have Cython installed, then it looks for a file with the same -file-name but a ``.c`` extension which it then uses instead of trying -to produce the ``.c`` file again. - +assume you are using Cython for). If you just use Cython to compile a standard Python module, then you will get a C extension module that typically runs a bit faster than the equivalent Python module. Further speed increases can be gained by using diff --git a/doc/source/user/c-info.ufunc-tutorial.rst b/doc/source/user/c-info.ufunc-tutorial.rst index 6b1aca65ed00..e5773f8232b8 100644 --- a/doc/source/user/c-info.ufunc-tutorial.rst +++ b/doc/source/user/c-info.ufunc-tutorial.rst @@ -74,9 +74,9 @@ For comparison and general edification of the reader we provide a simple implementation of a C extension of ``logit`` that uses no numpy. -To do this we need two files. The first is the C file which contains -the actual code, and the second is the ``setup.py`` file used to create -the module. +To do this we need three files. The first is the C file which contains +the actual code, and the others are two project files that describe +how to create the module. .. code-block:: c @@ -157,65 +157,91 @@ the module. return m; } -To use the ``setup.py file``, place ``setup.py`` and ``spammodule.c`` -in the same folder. Then ``python setup.py build`` will build the module to -import, or ``python setup.py install`` will install the module to your -site-packages directory. +To create the module, one proceeds as one would for a Python package, creating +a ``pyproject.toml`` file, which defines a build back-end, and then another +file for that backend which describes how to compile the code. For the backend, +we recommend ``meson-python``, as we use it for numpy itself, but below we +also show how to use the older ``setuptools``. - .. code-block:: python +.. tab-set:: - ''' - setup.py file for spammodule.c + .. tab-item:: meson - Calling - $python setup.py build_ext --inplace - will build the extension library in the current file. + Sample ``pyproject.toml`` and ``meson.build``. - Calling - $python setup.py build - will build a file that looks like ./build/lib*, where - lib* is a file that begins with lib. The library will - be in this file and end with a C library extension, - such as .so + .. code-block:: toml - Calling - $python setup.py install - will install the module in your site-packages file. + [project] + name = "spam" + version = "0.1" - See the setuptools section 'Building Extension Modules' - at setuptools.pypa.io for more information. - ''' + [build-system] + requires = ["meson-python"] + build-backend = "mesonpy" - from setuptools import setup, Extension - import numpy as np + .. code-block:: meson - module1 = Extension('spam', sources=['spammodule.c']) + project('spam', 'c') - setup(name='spam', version='1.0', ext_modules=[module1]) + py = import('python').find_installation() + sources = files('spammodule.c') -Once the spam module is imported into python, you can call logit + extension_module = py.extension_module( + 'spam', + sources, + install: true, + ) + + .. tab-item:: setuptools + + Sample ``pyproject.toml`` and ``setup.py``. + + .. code-block:: toml + + [project] + name = "spam" + version = "0.1" + + [build-system] + requires = ["setuptools"] + build-backend = "setuptools.build_meta" + + .. code-block:: python + + from setuptools import setup, Extension + + spammodule = Extension('spam', sources=['spammodule.c']) + + setup(name='spam', version='1.0', + ext_modules=[spammodule]) + +With either of the above, one can build and install the ``spam`` package with, + +.. code-block:: bash + + pip install . + +Once the ``spam`` module is imported into python, you can call logit via ``spam.logit``. Note that the function used above cannot be applied as-is to numpy arrays. To do so we must call :py:func:`numpy.vectorize` -on it. For example, if a python interpreter is opened in the file containing -the spam library or spam has been installed, one can perform the -following commands: - ->>> import numpy as np ->>> import spam ->>> spam.logit(0) --inf ->>> spam.logit(1) -inf ->>> spam.logit(0.5) -0.0 ->>> x = np.linspace(0,1,10) ->>> spam.logit(x) -TypeError: only length-1 arrays can be converted to Python scalars ->>> f = np.vectorize(spam.logit) ->>> f(x) -array([ -inf, -2.07944154, -1.25276297, -0.69314718, -0.22314355, - 0.22314355, 0.69314718, 1.25276297, 2.07944154, inf]) +on it. For example:: + + >>> import numpy as np + >>> import spam + >>> spam.logit(0) + -inf + >>> spam.logit(1) + inf + >>> spam.logit(0.5) + 0.0 + >>> x = np.linspace(0,1,10) + >>> spam.logit(x) + TypeError: only length-1 arrays can be converted to Python scalars + >>> f = np.vectorize(spam.logit) + >>> f(x) + array([ -inf, -2.07944154, -1.25276297, -0.69314718, -0.22314355, + 0.22314355, 0.69314718, 1.25276297, 2.07944154, inf]) THE RESULTING LOGIT FUNCTION IS NOT FAST! ``numpy.vectorize`` simply loops over ``spam.logit``. The loop is done at the C level, but the numpy @@ -236,12 +262,11 @@ Example NumPy ufunc for one dtype For simplicity we give a ufunc for a single dtype, the ``'f8'`` ``double``. As in the previous section, we first give the ``.c`` file -and then the ``setup.py`` file used to create the module containing the -ufunc. +and then the files used to create a ``npufunc`` module containing the ufunc. The place in the code corresponding to the actual computations for -the ufunc are marked with ``/\* BEGIN main ufunc computation \*/`` and -``/\* END main ufunc computation \*/``. The code in between those lines is +the ufunc are marked with ``/* BEGIN main ufunc computation */`` and +``/* END main ufunc computation */``. The code in between those lines is the primary thing that must be changed to create your own ufunc. .. code-block:: c @@ -339,59 +364,77 @@ the primary thing that must be changed to create your own ufunc. return m; } -This is a ``setup.py file`` for the above code. As before, the module -can be build via calling ``python setup.py build`` at the command prompt, -or installed to site-packages via ``python setup.py install``. The module -can also be placed into a local folder e.g. ``npufunc_directory`` below -using ``python setup.py build_ext --inplace``. +For the files needed to create the module, the main difference from our +previous example is that we now need to declare dependencies on numpy. - .. code-block:: python +.. tab-set:: - ''' - setup.py file for single_type_logit.c - Note that since this is a numpy extension - we add an include_dirs=[get_include()] so that the - extension is built with numpy's C/C++ header files. + .. tab-item:: meson - Calling - $python setup.py build_ext --inplace - will build the extension library in the npufunc_directory. + Sample ``pyproject.toml`` and ``meson.build``. - Calling - $python setup.py build - will build a file that looks like ./build/lib*, where - lib* is a file that begins with lib. The library will - be in this file and end with a C library extension, - such as .so + .. code-block:: toml - Calling - $python setup.py install - will install the module in your site-packages file. + [project] + name = "npufunc" + dependencies = ["numpy"] + version = "0.1" - See the setuptools section 'Building Extension Modules' - at setuptools.pypa.io for more information. - ''' + [build-system] + requires = ["meson-python", "numpy"] + build-backend = "mesonpy" - from setuptools import setup, Extension - from numpy import get_include + .. code-block:: meson - npufunc = Extension('npufunc', - sources=['single_type_logit.c'], - include_dirs=[get_include()]) + project('npufunc', 'c') - setup(name='npufunc', version='1.0', ext_modules=[npufunc]) + py = import('python').find_installation() + np_dep = dependency('numpy') + sources = files('single_type_logit.c') -After the above has been installed, it can be imported and used as follows. + extension_module = py.extension_module( + 'npufunc', + sources, + dependencies: [np_dep], + install: true, + ) ->>> import numpy as np ->>> import npufunc ->>> npufunc.logit(0.5) -np.float64(0.0) ->>> a = np.linspace(0,1,5) ->>> npufunc.logit(a) -array([ -inf, -1.09861229, 0. , 1.09861229, inf]) + .. tab-item:: setuptools + + Sample ``pyproject.toml`` and ``setup.py``. + + .. code-block:: toml + + [project] + name = "npufunc" + dependencies = ["numpy"] + version = "0.1" + + [build-system] + requires = ["setuptools", "numpy"] + build-backend = "setuptools.build_meta" + + .. code-block:: python + + from setuptools import setup, Extension + from numpy import get_include + + npufunc = Extension('npufunc', + sources=['single_type_logit.c'], + include_dirs=[get_include()]) + + setup(name='npufunc', version='1.0', ext_modules=[npufunc]) +After the above has been installed, it can be imported and used as follows:: + + >>> import numpy as np + >>> import npufunc + >>> npufunc.logit(0.5) + np.float64(0.0) + >>> a = np.linspace(0, 1, 5) + >>> npufunc.logit(a) + array([ -inf, -1.09861229, 0. , 1.09861229, inf]) .. _`sec:NumPy-many-loop`: @@ -402,14 +445,14 @@ Example NumPy ufunc with multiple dtypes .. index:: pair: ufunc; adding new -We finally give an example of a full ufunc, with inner loops for -half-floats, floats, doubles, and long doubles. As in the previous -sections we first give the ``.c`` file and then the corresponding -``setup.py`` file. +We now extend the above to a full ``logit`` ufunc, with inner loops for +floats, doubles, and long doubles. Here, we can use the same build files +as above, except we need to change the source file from ``single_type_logit.c`` +to ``multi_type_logit.c``. The places in the code corresponding to the actual computations for -the ufunc are marked with ``/\* BEGIN main ufunc computation \*/`` and -``/\* END main ufunc computation \*/``. The code in between those lines +the ufunc are marked with ``/* BEGIN main ufunc computation */`` and +``/* END main ufunc computation */``. The code in between those lines is the primary thing that must be changed to create your own ufunc. @@ -419,7 +462,6 @@ is the primary thing that must be changed to create your own ufunc. #include #include "numpy/ndarraytypes.h" #include "numpy/ufuncobject.h" - #include "numpy/halffloat.h" #include /* @@ -514,39 +556,13 @@ is the primary thing that must be changed to create your own ufunc. } - static void half_float_logit(char **args, const npy_intp *dimensions, - const npy_intp *steps, void *data) - { - npy_intp i; - npy_intp n = dimensions[0]; - char *in = args[0], *out = args[1]; - npy_intp in_step = steps[0], out_step = steps[1]; - - float tmp; - - for (i = 0; i < n; i++) { - - /* BEGIN main ufunc computation */ - tmp = npy_half_to_float(*(npy_half *)in); - tmp /= 1 - tmp; - tmp = logf(tmp); - *((npy_half *)out) = npy_float_to_half(tmp); - /* END main ufunc computation */ - - in += in_step; - out += out_step; - } - } - /*This gives pointers to the above functions*/ - PyUFuncGenericFunction funcs[4] = {&half_float_logit, - &float_logit, + PyUFuncGenericFunction funcs[3] = {&float_logit, &double_logit, &long_double_logit}; - static const char types[8] = {NPY_HALF, NPY_HALF, - NPY_FLOAT, NPY_FLOAT, + static const char types[6] = {NPY_FLOAT, NPY_FLOAT, NPY_DOUBLE, NPY_DOUBLE, NPY_LONGDOUBLE, NPY_LONGDOUBLE}; @@ -586,92 +602,40 @@ is the primary thing that must be changed to create your own ufunc. return m; } -This is a ``setup.py`` file for the above code. As before, the module -can be build via calling ``python setup.py build`` at the command prompt, -or installed to site-packages via ``python setup.py install``. - - .. code-block:: python - - ''' - setup.py file for multi_type_logit.c - Note that since this is a numpy extension - we add an include_dirs=[get_include()] so that the - extension is built with numpy's C/C++ header files. - Furthermore, we also have to include the npymath - lib for half-float d-type. - - Calling - $python setup.py build_ext --inplace - will build the extension library in the current file. - - Calling - $python setup.py build - will build a file that looks like ./build/lib*, where - lib* is a file that begins with lib. The library will - be in this file and end with a C library extension, - such as .so - - Calling - $python setup.py install - will install the module in your site-packages file. - - See the setuptools section 'Building Extension Modules' - at setuptools.pypa.io for more information. - ''' - - from setuptools import setup, Extension - from numpy import get_include - from os import path - - path_to_npymath = path.join(get_include(), '..', 'lib') - npufunc = Extension('npufunc', - sources=['multi_type_logit.c'], - include_dirs=[get_include()], - # Necessary for the half-float d-type. - library_dirs=[path_to_npymath], - libraries=["npymath"]) - - setup(name='npufunc', version='1.0', ext_modules=[npufunc]) - - After the above has been installed, it can be imported and used as follows. >>> import numpy as np >>> import npufunc >>> npufunc.logit(0.5) np.float64(0.0) ->>> a = np.linspace(0,1,5) +>>> a = np.linspace(0, 1, 5, dtype=np.float32) >>> npufunc.logit(a) -array([ -inf, -1.09861229, 0. , 1.09861229, inf]) +:1: RuntimeWarning: divide by zero encountered in logit +array([ -inf, -1.0986123, 0. , 1.0986123, inf], + dtype=float32) +.. note:: + Supporting ``float16`` (half-precision) in custom ufuncs is more complex + due to its non-standard C representation and conversion requirements. The + above code can process ``float16`` input, but will do so by converting it + to ``float32``. The result will then be ``float32`` too, but one can + convert it back to ``float16`` by passing in a suitable output, as in + ``npufunc.logit(a, out=np.empty_like(a))``. For examples of actual + ``float16`` loops, see the numpy source code. .. _`sec:NumPy-many-arg`: Example NumPy ufunc with multiple arguments/return values ========================================================= -Our final example is a ufunc with multiple arguments. It is a modification -of the code for a logit ufunc for data with a single dtype. We -compute ``(A * B, logit(A * B))``. +Creating a ufunc with multiple arguments is not difficult. Here, we make a +modification of the code for a logit ufunc, where we compute ``(A * B, +logit(A * B))``. For simplicity, we only create a loop for doubles. -We only give the C code as the setup.py file is exactly the same as -the ``setup.py`` file in `Example NumPy ufunc for one dtype`_, except that -the line - - .. code-block:: python - - npufunc = Extension('npufunc', - sources=['single_type_logit.c'], - include_dirs=[get_include()]) - -is replaced with - - .. code-block:: python - - npufunc = Extension('npufunc', - sources=['multi_arg_logit.c'], - include_dirs=[get_include()]) +We again only give the C code as the files needed to create the module are the +same as before, but with the source file name replaced by +``multi_arg_logit.c``. The C file is given below. The ufunc generated takes two arguments ``A`` and ``B``. It returns a tuple whose first element is ``A * B`` and whose second @@ -684,7 +648,6 @@ as well as all other properties of a ufunc. #include #include "numpy/ndarraytypes.h" #include "numpy/ufuncobject.h" - #include "numpy/halffloat.h" #include /* @@ -786,29 +749,12 @@ Example NumPy ufunc with structured array dtype arguments This example shows how to create a ufunc for a structured array dtype. For the example we show a trivial ufunc for adding two arrays with dtype ``'u8,u8,u8'``. The process is a bit different from the other examples since -a call to :c:func:`PyUFunc_FromFuncAndData` doesn't fully register ufuncs for +a call to :c:func:`PyUFunc_FromFuncAndData` cannot register ufuncs for custom dtypes and structured array dtypes. We need to also call :c:func:`PyUFunc_RegisterLoopForDescr` to finish setting up the ufunc. -We only give the C code as the ``setup.py`` file is exactly the same as -the ``setup.py`` file in `Example NumPy ufunc for one dtype`_, except that -the line - - .. code-block:: python - - npufunc = Extension('npufunc', - sources=['single_type_logit.c'], - include_dirs=[get_include()]) - -is replaced with - - .. code-block:: python - - npufunc = Extension('npufunc', - sources=['add_triplet.c'], - include_dirs=[get_include()]) - -The C file is given below. +We only give the C code as the files needed to construct the module are again +exactly the same as before, except that the source file is now ``add_triplet.c``. .. code-block:: c @@ -865,15 +811,9 @@ The C file is given below. } } - /* This a pointer to the above function */ - PyUFuncGenericFunction funcs[1] = {&add_uint64_triplet}; - - /* These are the input and return dtypes of add_uint64_triplet. */ - static const char types[3] = {NPY_UINT64, NPY_UINT64, NPY_UINT64}; - static struct PyModuleDef moduledef = { PyModuleDef_HEAD_INIT, - "struct_ufunc_test", + "npufunc", NULL, -1, StructUfuncTestMethods, @@ -913,7 +853,7 @@ The C file is given below. dtypes[2] = dtype; /* Register ufunc for structured dtype */ - PyUFunc_RegisterLoopForDescr(add_triplet, + PyUFunc_RegisterLoopForDescr((PyUFuncObject *)add_triplet, dtype, &add_uint64_triplet, dtypes, @@ -926,37 +866,11 @@ The C file is given below. return m; } -.. index:: - pair: ufunc; adding new - -The returned ufunc object is a callable Python object. It should be -placed in a (module) dictionary under the same name as was used in the -name argument to the ufunc-creation routine. The following example is -adapted from the umath module - - .. code-block:: c +Sample usage:: - static PyUFuncGenericFunction atan2_functions[] = { - PyUFunc_ff_f, PyUFunc_dd_d, - PyUFunc_gg_g, PyUFunc_OO_O_method}; - static void *atan2_data[] = { - (void *)atan2f, (void *)atan2, - (void *)atan2l, (void *)"arctan2"}; - static const char atan2_signatures[] = { - NPY_FLOAT, NPY_FLOAT, NPY_FLOAT, - NPY_DOUBLE, NPY_DOUBLE, NPY_DOUBLE, - NPY_LONGDOUBLE, NPY_LONGDOUBLE, NPY_LONGDOUBLE - NPY_OBJECT, NPY_OBJECT, NPY_OBJECT}; - ... - /* in the module initialization code */ - PyObject *f, *dict, *module; - ... - dict = PyModule_GetDict(module); - ... - f = PyUFunc_FromFuncAndData(atan2_functions, - atan2_data, atan2_signatures, 4, 2, 1, - PyUFunc_None, "arctan2", - "a safe and correct arctan(x1/x2)", 0); - PyDict_SetItemString(dict, "arctan2", f); - Py_DECREF(f); - ... + >>> import npufunc + >>> import numpy as np + >>> a = np.array([(1, 2, 3), (4, 5, 6)], "u8,u8,u8") + >>> npufunc.add_triplet(a, a) + array([(2, 4, 6), (8, 10, 12)], + dtype=[('f0', '>> np.set_printoptions(precision=2) + >>> np.get_printoptions() + {'edgeitems': 3, 'threshold': 1000, 'floatmode': 'maxprec', 'precision': 2, 'suppress': False, 'linewidth': 75, 'nanstr': 'nan', 'infstr': 'inf', 'sign': '-', 'formatter': None, 'legacy': False, 'override_repr': None} + +To restore the default settings, use: + + >>> np.set_printoptions(edgeitems=3, infstr='inf', + ... linewidth=75, nanstr='nan', precision=8, + ... suppress=False, threshold=1000, formatter=None) + + +Applying settings temporarily +----------------------------- + +Use :func:`numpy.printoptions` as a context manager to temporarily override print settings within a specific scope: + + + >>> arr = np.array([0.155, 0.184, 0.173]) + >>> with np.printoptions(precision=2): + ... print(arr) + [0.15 0.18 0.17] + + +All keywords that apply to :func:`numpy.set_printoptions` also apply to :func:`numpy.printoptions`. + + +Changing the number of digits of precision +========================================== + +The default number of fractional digits displayed is 8. You can change this number using ``precision`` keyword. + + >>> arr = np.array([0.1, 0.184, 0.17322]) + >>> with np.printoptions(precision=2): + ... print(arr) + [0.1 0.18 0.17] + + +The ``floatmode`` option determines how the ``precision`` setting is interpreted. +By default, ``floatmode=maxprec_equal`` displays values with the minimal number of digits needed to uniquely represent them, +using the same number of digits across all elements. +If you want to show exactly the same number of digits specified by ``precision``, use ``floatmode=fixed``: + + >>> arr = np.array([0.1, 0.184, 0.173], dtype=np.float32) + >>> with np.printoptions(precision=2, floatmode="fixed"): + ... print(arr) + [0.10 0.18 0.17] + + +Changing how `nan` and `inf` are displayed +========================================== + +By default, `numpy.nan` is displayed as `nan` and `numpy.inf` is displayed as `inf`. +You can override these representations using the ``nanstr`` and ``infstr`` options: + + >>> arr = np.array([np.inf, np.nan, 0]) + >>> with np.printoptions(nanstr="NAN", infstr="INF"): + ... print(arr) + [INF NAN 0.] + + +Controlling scientific notations +================================ + +By default, NumPy uses scientific notation when: + +- The absolute value of the smallest number is less than ``1e-4``, or +- The ratio of the largest to the smallest absolute value is greater than ``1e3`` + + >>> arr = np.array([0.00002, 210000.0, 3.14]) + >>> print(arr) + [2.00e-05 2.10e+05 3.14e+00] + +To suppress scientific notation and always use fixed-point notation, set ``suppress=True``: + + >>> arr = np.array([0.00002, 210000.0, 3.14]) + >>> with np.printoptions(suppress=True): + ... print(arr) + [ 0.00002 210000. 3.14 ] + + + +Applying custom formatting functions +==================================== + +You can apply custom formatting functions to specific or all data types using ``formatter`` keyword. +See :func:`numpy.set_printoptions` for more details on supported format keys. + +For example, to format `datetime64` values with a custom function: + + >>> arr = np.array([np.datetime64("2025-01-01"), np.datetime64("2024-01-01")]) + >>> with np.printoptions(formatter={"datetime":lambda x: f"(Year: {x.item().year}, Month: {x.item().month})"}): + ... print(arr) + [(Year: 2025, Month: 1) (Year: 2024, Month: 1)] + diff --git a/doc/source/user/howtos_index.rst b/doc/source/user/howtos_index.rst index ca30f7e9115d..a8a8229dd7dd 100644 --- a/doc/source/user/howtos_index.rst +++ b/doc/source/user/howtos_index.rst @@ -16,3 +16,4 @@ the package, see the :ref:`API reference `. how-to-index how-to-verify-bug how-to-partition + how-to-print diff --git a/doc/source/user/misc.rst b/doc/source/user/misc.rst index 6d652e3ca67f..a882afa37afd 100644 --- a/doc/source/user/misc.rst +++ b/doc/source/user/misc.rst @@ -7,7 +7,7 @@ Miscellaneous IEEE 754 floating point special values -------------------------------------- -Special values defined in numpy: nan, inf, +Special values defined in numpy: :data:`~numpy.nan`, :data:`~numpy.inf` NaNs can be used as a poor-man's mask (if you don't care what the original value was) @@ -17,29 +17,39 @@ Note: cannot use equality to test NaNs. E.g.: :: >>> myarr = np.array([1., 0., np.nan, 3.]) >>> np.nonzero(myarr == np.nan) (array([], dtype=int64),) + +:: + >>> np.nan == np.nan # is always False! Use special numpy functions instead. False + +:: + >>> myarr[myarr == np.nan] = 0. # doesn't work >>> myarr array([ 1., 0., nan, 3.]) + +:: + >>> myarr[np.isnan(myarr)] = 0. # use this instead find >>> myarr array([1., 0., 0., 3.]) -Other related special value functions: :: +Other related special value functions: - isinf(): True if value is inf - isfinite(): True if not nan or inf - nan_to_num(): Map nan to 0, inf to max float, -inf to min float +- :func:`~numpy.isnan` - True if value is nan +- :func:`~numpy.isinf` - True if value is inf +- :func:`~numpy.isfinite` - True if not nan or inf +- :func:`~numpy.nan_to_num` - Map nan to 0, inf to max float, -inf to min float The following corresponds to the usual functions except that nans are excluded -from the results: :: +from the results: - nansum() - nanmax() - nanmin() - nanargmax() - nanargmin() +- :func:`~numpy.nansum` +- :func:`~numpy.nanmax` +- :func:`~numpy.nanmin` +- :func:`~numpy.nanargmax` +- :func:`~numpy.nanargmin` >>> x = np.arange(10.) >>> x[3] = np.nan @@ -47,168 +57,3 @@ from the results: :: nan >>> np.nansum(x) 42.0 - -How numpy handles numerical exceptions --------------------------------------- - -The default is to ``'warn'`` for ``invalid``, ``divide``, and ``overflow`` -and ``'ignore'`` for ``underflow``. But this can be changed, and it can be -set individually for different kinds of exceptions. The different behaviors -are: - - - 'ignore' : Take no action when the exception occurs. - - 'warn' : Print a `RuntimeWarning` (via the Python `warnings` module). - - 'raise' : Raise a `FloatingPointError`. - - 'call' : Call a function specified using the `seterrcall` function. - - 'print' : Print a warning directly to ``stdout``. - - 'log' : Record error in a Log object specified by `seterrcall`. - -These behaviors can be set for all kinds of errors or specific ones: - - - all : apply to all numeric exceptions - - invalid : when NaNs are generated - - divide : divide by zero (for integers as well!) - - overflow : floating point overflows - - underflow : floating point underflows - -Note that integer divide-by-zero is handled by the same machinery. -These behaviors are set on a per-thread basis. - -Examples --------- - -:: - - >>> oldsettings = np.seterr(all='warn') - >>> np.zeros(5,dtype=np.float32)/0. - Traceback (most recent call last): - ... - RuntimeWarning: invalid value encountered in divide - >>> j = np.seterr(under='ignore') - >>> np.array([1.e-100])**10 - array([0.]) - >>> j = np.seterr(invalid='raise') - >>> np.sqrt(np.array([-1.])) - Traceback (most recent call last): - ... - FloatingPointError: invalid value encountered in sqrt - >>> def errorhandler(errstr, errflag): - ... print("saw stupid error!") - >>> np.seterrcall(errorhandler) - >>> j = np.seterr(all='call') - >>> np.zeros(5, dtype=np.int32)/0 - saw stupid error! - array([nan, nan, nan, nan, nan]) - >>> j = np.seterr(**oldsettings) # restore previous - ... # error-handling settings - -Interfacing to C ----------------- -Only a survey of the choices. Little detail on how each works. - -1) Bare metal, wrap your own C-code manually. - - - Plusses: - - - Efficient - - No dependencies on other tools - - - Minuses: - - - Lots of learning overhead: - - - need to learn basics of Python C API - - need to learn basics of numpy C API - - need to learn how to handle reference counting and love it. - - - Reference counting often difficult to get right. - - - getting it wrong leads to memory leaks, and worse, segfaults - -2) Cython - - - Plusses: - - - avoid learning C API's - - no dealing with reference counting - - can code in pseudo python and generate C code - - can also interface to existing C code - - should shield you from changes to Python C api - - has become the de-facto standard within the scientific Python community - - fast indexing support for arrays - - - Minuses: - - - Can write code in non-standard form which may become obsolete - - Not as flexible as manual wrapping - -3) ctypes - - - Plusses: - - - part of Python standard library - - good for interfacing to existing shareable libraries, particularly - Windows DLLs - - avoids API/reference counting issues - - good numpy support: arrays have all these in their ctypes - attribute: :: - - a.ctypes.data - a.ctypes.data_as - a.ctypes.shape - a.ctypes.shape_as - a.ctypes.strides - a.ctypes.strides_as - - - Minuses: - - - can't use for writing code to be turned into C extensions, only a wrapper - tool. - -4) SWIG (automatic wrapper generator) - - - Plusses: - - - around a long time - - multiple scripting language support - - C++ support - - Good for wrapping large (many functions) existing C libraries - - - Minuses: - - - generates lots of code between Python and the C code - - can cause performance problems that are nearly impossible to optimize - out - - interface files can be hard to write - - doesn't necessarily avoid reference counting issues or needing to know - API's - -5) Psyco - - - Plusses: - - - Turns pure python into efficient machine code through jit-like - optimizations - - very fast when it optimizes well - - - Minuses: - - - Only on intel (windows?) - - Doesn't do much for numpy? - -Interfacing to Fortran: ------------------------ -The clear choice to wrap Fortran code is -`f2py `_. - -Pyfort is an older alternative, but not supported any longer. -Fwrap is a newer project that looked promising but isn't being developed any -longer. - -Interfacing to C++: -------------------- - 1) Cython - 2) CXX - 3) Boost.python - 4) SWIG - 5) SIP (used mainly in PyQT) diff --git a/doc/source/user/numpy-for-matlab-users.rst b/doc/source/user/numpy-for-matlab-users.rst index 9e8093b20f02..e05e123e224c 100644 --- a/doc/source/user/numpy-for-matlab-users.rst +++ b/doc/source/user/numpy-for-matlab-users.rst @@ -674,10 +674,10 @@ are only a handful of key differences between the two. - Operators ``*`` and ``@``, functions ``dot()``, and ``multiply()``: - - For ``array``, **``*`` means element-wise multiplication**, while - **``@`` means matrix multiplication**; they have associated functions + - For ``array``, ``*`` **means element-wise multiplication**, while + ``@`` **means matrix multiplication**; they have associated functions ``multiply()`` and ``dot()``. - - For ``matrix``, **``*`` means matrix multiplication**, and for + - For ``matrix``, ``*`` **means matrix multiplication**, and for element-wise multiplication one has to use the ``multiply()`` function. - Handling of vectors (one-dimensional arrays) diff --git a/doc/source/user/quickstart.rst b/doc/source/user/quickstart.rst index 3f97f005898b..1208bd1a6347 100644 --- a/doc/source/user/quickstart.rst +++ b/doc/source/user/quickstart.rst @@ -163,7 +163,7 @@ The type of the array can also be explicitly specified at creation time: :: - >>> c = np.array([[1, 2], [3, 4]], dtype=complex) + >>> c = np.array([[1, 2], [3, 4]], dtype=np.complex128) >>> c array([[1.+0.j, 2.+0.j], [3.+0.j, 4.+0.j]]) @@ -346,7 +346,7 @@ existing array rather than create a new one. :: >>> rg = np.random.default_rng(1) # create instance of default random number generator - >>> a = np.ones((2, 3), dtype=int) + >>> a = np.ones((2, 3), dtype=np.int_) >>> b = rg.random((2, 3)) >>> a *= 3 >>> a @@ -535,7 +535,7 @@ are given in a tuple separated by commas:: >>> def f(x, y): ... return 10 * x + y ... - >>> b = np.fromfunction(f, (5, 4), dtype=int) + >>> b = np.fromfunction(f, (5, 4), dtype=np.int_) >>> b array([[ 0, 1, 2, 3], [10, 11, 12, 13], @@ -1256,7 +1256,7 @@ set `__: ... A, B = np.meshgrid(x, y) ... C = A + B*1j ... z = np.zeros_like(C) - ... divtime = maxit + np.zeros(z.shape, dtype=int) + ... divtime = maxit + np.zeros(z.shape, dtype=np.int_) ... ... for i in range(maxit): ... z = z**2 + C @@ -1479,4 +1479,4 @@ Further reading - `SciPy Tutorial `__ - `SciPy Lecture Notes `__ - A `matlab, R, IDL, NumPy/SciPy dictionary `__ -- :doc:`tutorial-svd ` +- :doc:`tutorial-svd ` diff --git a/doc/source/user/troubleshooting-importerror.rst b/doc/source/user/troubleshooting-importerror.rst index 6be8831d9c2a..68ac4f939525 100644 --- a/doc/source/user/troubleshooting-importerror.rst +++ b/doc/source/user/troubleshooting-importerror.rst @@ -83,28 +83,6 @@ on how to properly configure Eclipse/PyDev to use Anaconda Python with specific conda environments. -Raspberry Pi ------------- - -There are sometimes issues reported on Raspberry Pi setups when installing -using ``pip3 install`` (or ``pip`` install). These will typically mention:: - - libf77blas.so.3: cannot open shared object file: No such file or directory - - -The solution will be to either:: - - sudo apt-get install libatlas-base-dev - -to install the missing libraries expected by the self-compiled NumPy -(ATLAS is a possible provider of linear algebra). - -*Alternatively* use the NumPy provided by Raspbian. In which case run:: - - pip3 uninstall numpy # remove previously installed version - apt install python3-numpy - - Debug build on Windows ---------------------- @@ -180,7 +158,7 @@ line that isn't inside NumPy to see which package has the incompatibility. Note your NumPy version and the version of the incompatible package to help you find the best solution. -There can be various reason for the incompatibility: +There can be various reasons for the incompatibility: * You have recently upgraded NumPy, most likely to NumPy 2, and the other module now also needs to be upgraded. (NumPy 2 was released in June 2024.) diff --git a/environment.yml b/environment.yml index 91585a8dcb13..774d6c0209ac 100644 --- a/environment.yml +++ b/environment.yml @@ -7,7 +7,7 @@ name: numpy-dev channels: - conda-forge dependencies: - - python=3.12 # need to pin to avoid issues with builds + - python=3.12 # need to pin to avoid issues with builds - cython>=3.0 - compilers - openblas @@ -16,7 +16,7 @@ dependencies: - ninja - pkg-config - meson-python - - spin==0.13 + - spin==0.15 - ccache # For testing - pytest @@ -24,9 +24,8 @@ dependencies: - pytest-xdist - hypothesis # For type annotations - - typing_extensions>=4.5.0 - - mypy=1.15.0 - - orjson # makes mypy faster + - mypy=1.19.1 + - orjson # makes mypy faster # For building docs - sphinx>=4.5.0 - sphinx-copybutton @@ -45,8 +44,9 @@ dependencies: # NOTE: breathe 4.33.0 collides with sphinx.ext.graphviz - breathe>4.33.0 # For linting - - ruff=0.11.9 + - cython-lint + - ruff=0.14.7 - gitpython # Used in some tests - cffi - - pytz + - tzdata diff --git a/meson.build b/meson.build index 0d436352cbbd..2cb7ce987ad5 100644 --- a/meson.build +++ b/meson.build @@ -1,12 +1,9 @@ project( 'NumPy', 'c', 'cpp', 'cython', - version: run_command( - # This should become `numpy/_version.py` in NumPy 2.0 - ['numpy/_build_utils/gitversion.py'], - check: true).stdout().strip(), - license: 'BSD-3', - meson_version: '>=1.5.2', # version in vendored-meson is 1.5.2 + version: run_command(['numpy/_build_utils/gitversion.py'], check: true).stdout().strip(), + license: 'BSD-3-Clause AND 0BSD AND MIT AND Zlib AND CC0-1.0', + meson_version: '>=1.8.3', # version in vendored-meson default_options: [ 'buildtype=debugoptimized', 'b_ndebug=if-release', diff --git a/meson.options b/meson.options index b09992fe9b91..e7011a3b2f2e 100644 --- a/meson.options +++ b/meson.options @@ -28,12 +28,13 @@ option('disable-optimization', type: 'boolean', value: false, description: 'Disable CPU optimized code (dispatch,simd,unroll...)') option('cpu-baseline', type: 'string', value: 'min', description: 'Minimal set of required CPU features') -option('cpu-dispatch', type: 'string', value: 'max -xop -fma4', +option('cpu-baseline-detect', type: 'feature', value: 'auto', + description: 'Detect CPU baseline from the compiler flags') +option('cpu-dispatch', type: 'string', value: 'max', description: 'Dispatched set of additional CPU features') option('test-simd', type: 'array', value: [ - 'BASELINE', 'SSE2', 'SSE42', 'XOP', 'FMA4', - 'AVX2', 'FMA3', 'AVX2,FMA3', 'AVX512F', 'AVX512_SKX', + 'BASELINE', 'X86_V2', 'X86_V3', 'X86_V4', 'VSX', 'VSX2', 'VSX3', 'VSX4', 'NEON', 'ASIMD', 'VX', 'VXE', 'VXE2', diff --git a/meson_cpu/arm/meson.build b/meson_cpu/arm/meson.build index 7ffa3ef58ed0..92d241883795 100644 --- a/meson_cpu/arm/meson.build +++ b/meson_cpu/arm/meson.build @@ -2,21 +2,21 @@ source_root = meson.project_source_root() mod_features = import('features') NEON = mod_features.new( 'NEON', 1, - test_code: files(source_root + '/numpy/distutils/checks/cpu_neon.c')[0] + test_code: files(source_root + '/numpy/_core/src/_simd/checks/cpu_neon.c')[0] ) NEON_FP16 = mod_features.new( 'NEON_FP16', 2, implies: NEON, - test_code: files(source_root + '/numpy/distutils/checks/cpu_neon_fp16.c')[0] + test_code: files(source_root + '/numpy/_core/src/_simd/checks/cpu_neon_fp16.c')[0] ) # FMA NEON_VFPV4 = mod_features.new( 'NEON_VFPV4', 3, implies: NEON_FP16, - test_code: files(source_root + '/numpy/distutils/checks/cpu_neon_vfpv4.c')[0] + test_code: files(source_root + '/numpy/_core/src/_simd/checks/cpu_neon_vfpv4.c')[0] ) # Advanced SIMD ASIMD = mod_features.new( 'ASIMD', 4, implies: NEON_VFPV4, detect: {'val': 'ASIMD', 'match': 'NEON.*'}, - test_code: files(source_root + '/numpy/distutils/checks/cpu_asimd.c')[0] + test_code: files(source_root + '/numpy/_core/src/_simd/checks/cpu_asimd.c')[0] ) cpu_family = host_machine.cpu_family() if cpu_family == 'aarch64' @@ -37,29 +37,29 @@ endif ASIMDHP = mod_features.new( 'ASIMDHP', 5, implies: ASIMD, args: {'val': '-march=armv8.2-a+fp16', 'match': '-march=.*', 'mfilter': '\+.*'}, - test_code: files(source_root + '/numpy/distutils/checks/cpu_asimdhp.c')[0] + test_code: files(source_root + '/numpy/_core/src/_simd/checks/cpu_asimdhp.c')[0] ) ## ARMv8.2 dot product ASIMDDP = mod_features.new( 'ASIMDDP', 6, implies: ASIMD, args: {'val': '-march=armv8.2-a+dotprod', 'match': '-march=.*', 'mfilter': '\+.*'}, - test_code: files(source_root + '/numpy/distutils/checks/cpu_asimddp.c')[0] + test_code: files(source_root + '/numpy/_core/src/_simd/checks/cpu_asimddp.c')[0] ) ## ARMv8.2 Single & half-precision Multiply ASIMDFHM = mod_features.new( 'ASIMDFHM', 7, implies: ASIMDHP, args: {'val': '-march=armv8.2-a+fp16fml', 'match': '-march=.*', 'mfilter': '\+.*'}, - test_code: files(source_root + '/numpy/distutils/checks/cpu_asimdfhm.c')[0] + test_code: files(source_root + '/numpy/_core/src/_simd/checks/cpu_asimdfhm.c')[0] ) ## Scalable Vector Extensions (SVE) SVE = mod_features.new( 'SVE', 8, implies: ASIMDHP, args: {'val': '-march=armv8.2-a+sve', 'match': '-march=.*', 'mfilter': '\+.*'}, - test_code: files(source_root + '/numpy/distutils/checks/cpu_sve.c')[0] + test_code: files(source_root + '/numpy/_core/src/_simd/checks/cpu_sve.c')[0] ) # TODO: Add support for MSVC ARM_FEATURES = { 'NEON': NEON, 'NEON_FP16': NEON_FP16, 'NEON_VFPV4': NEON_VFPV4, - 'ASIMD': ASIMD, 'ASIMDHP': ASIMDHP, 'ASIMDFHM': ASIMDFHM, + 'ASIMD': ASIMD, 'ASIMDHP': ASIMDHP, 'ASIMDDP': ASIMDDP, 'ASIMDFHM': ASIMDFHM, 'SVE': SVE } diff --git a/meson_cpu/loongarch64/meson.build b/meson_cpu/loongarch64/meson.build index 570e3bfcda01..d59b5682d646 100644 --- a/meson_cpu/loongarch64/meson.build +++ b/meson_cpu/loongarch64/meson.build @@ -3,6 +3,6 @@ mod_features = import('features') LSX = mod_features.new( 'LSX', 1, args: ['-mlsx'], - test_code: files(source_root + '/numpy/distutils/checks/cpu_lsx.c')[0] + test_code: files(source_root + '/numpy/_core/src/_simd/checks/cpu_lsx.c')[0] ) LOONGARCH64_FEATURES = {'LSX': LSX} diff --git a/meson_cpu/meson.build b/meson_cpu/meson.build index e5b6d0fbe7be..02bbe5f7618e 100644 --- a/meson_cpu/meson.build +++ b/meson_cpu/meson.build @@ -46,20 +46,22 @@ if get_option('disable-optimization') CPU_CONF_BASELINE = 'none' CPU_CONF_DISPATCH = 'none' else - baseline_detect = false + baseline_detect = get_option('cpu-baseline-detect').enabled() c_args = get_option('c_args') - foreach arg : c_args - foreach carch : ['-march', '-mcpu', '-xhost', '/QxHost'] - if arg.contains(carch) - message('Appending option "detect" to "cpu-baseline" due to detecting global architecture c_arg "' + arg + '"') - baseline_detect = true + if get_option('cpu-baseline-detect').auto() + foreach arg : c_args + foreach carch : ['-march', '-mcpu', '-xhost', '/QxHost'] + if arg.contains(carch) + message('Appending option "detect" to "cpu-baseline" due to detecting global architecture c_arg "' + arg + '"') + baseline_detect = true + break + endif + endforeach + if baseline_detect break endif endforeach - if baseline_detect - break - endif - endforeach + endif # The required minimal set of required CPU features. CPU_CONF_BASELINE = get_option('cpu-baseline') if baseline_detect @@ -86,13 +88,16 @@ CPU_FEATURES += S390X_FEATURES CPU_FEATURES += RV64_FEATURES CPU_FEATURES += LOONGARCH64_FEATURES +CPU_FEATURES_REDIRECT = {} +CPU_FEATURES_REDIRECT += X86_REDIRECT + # Parse the requested baseline (CPU_CONF_BASELINE) and dispatch features # (CPU_CONF_DISPATCH). cpu_family = host_machine.cpu_family() # Used by build option 'min' min_features = { - 'x86': [SSE2], - 'x86_64': [SSE3], + 'x86': [X86_V2], + 'x86_64': [X86_V2], 'ppc64': [], 's390x': [], 'arm': [], @@ -189,15 +194,31 @@ foreach opt_name, conf : parse_options accumulate = min_features elif tok == 'MAX' accumulate = max_features - elif tok in CPU_FEATURES - tokobj = CPU_FEATURES[tok] - if tokobj not in max_features - ignored += tok - continue - endif - accumulate = [tokobj] else - error('Invalid token "'+tok+'" within option --'+opt_name) + if tok in CPU_FEATURES_REDIRECT + ntok = CPU_FEATURES_REDIRECT[tok] + if ntok == '' + warning('Ignoring CPU feature "@0@" in --@1@ option - feature is no longer supported.'.format(tok, opt_name)) + else + warning('CPU Feature "@0@" is no longer explicitly supported, redirecting to "@1@".'.format(tok, ntok)) + endif + warning('Please check the latest documentation for build options.') + if ntok == '' or not append # redirected features not safe to be execluded + continue + endif + tok = ntok + endif + if tok not in CPU_FEATURES + error('Invalid token "'+tok+'" within option --'+opt_name) + endif + if tok in CPU_FEATURES + tokobj = CPU_FEATURES[tok] + if tokobj not in max_features + ignored += tok + continue + endif + accumulate = [tokobj] + endif endif if append foreach fet : accumulate @@ -207,8 +228,17 @@ foreach opt_name, conf : parse_options endforeach else filterd = [] + # filter out the features that are in the accumulate list + # including any successor features foreach fet : result - if fet not in accumulate + escape = false + foreach fet2 : accumulate + if fet2 in mod_features.implicit_c(fet) + escape = true + break + endif + endforeach + if not escape filterd += fet endif endforeach diff --git a/meson_cpu/ppc64/meson.build b/meson_cpu/ppc64/meson.build index bad95257ca95..58690d1fa80a 100644 --- a/meson_cpu/ppc64/meson.build +++ b/meson_cpu/ppc64/meson.build @@ -3,19 +3,17 @@ mod_features = import('features') compiler_id = meson.get_compiler('c').get_id() VSX = mod_features.new( - 'VSX', 1, args: '-mvsx', - test_code: files(source_root + '/numpy/distutils/checks/cpu_vsx.c')[0], + 'VSX', 1, args: ['-mvsx', '-DHWY_COMPILE_ONLY_STATIC', '-DHWY_DISABLE_ATTR'] + + (compiler_id == 'clang' ? ['-maltivec'] : []), + test_code: files(source_root + '/numpy/_core/src/_simd/checks/cpu_vsx.c')[0], extra_tests: { - 'VSX_ASM': files(source_root + '/numpy/distutils/checks/extra_vsx_asm.c')[0] + 'VSX_ASM': files(source_root + '/numpy/_core/src/_simd/checks/extra_vsx_asm.c')[0] } ) -if compiler_id == 'clang' - VSX.update(args: ['-mvsx', '-maltivec']) -endif VSX2 = mod_features.new( 'VSX2', 2, implies: VSX, args: {'val': '-mcpu=power8', 'match': '.*vsx'}, detect: {'val': 'VSX2', 'match': 'VSX'}, - test_code: files(source_root + '/numpy/distutils/checks/cpu_vsx2.c')[0], + test_code: files(source_root + '/numpy/_core/src/_simd/checks/cpu_vsx2.c')[0], ) # VSX2 is hardware baseline feature on ppc64le since the first little-endian # support was part of Power8 @@ -23,19 +21,19 @@ if host_machine.endian() == 'little' VSX.update(implies: VSX2) endif VSX3 = mod_features.new( - 'VSX3', 3, implies: VSX2, args: {'val': '-mcpu=power9', 'match': '.*[mcpu=|vsx].*'}, + 'VSX3', 3, implies: VSX2, args: {'val': '-mcpu=power9', 'match': '.*(?:mcpu=|vsx).*'}, detect: {'val': 'VSX3', 'match': 'VSX.*'}, - test_code: files(source_root + '/numpy/distutils/checks/cpu_vsx3.c')[0], + test_code: files(source_root + '/numpy/_core/src/_simd/checks/cpu_vsx3.c')[0], extra_tests: { - 'VSX3_HALF_DOUBLE': files(source_root + '/numpy/distutils/checks/extra_vsx3_half_double.c')[0] + 'VSX3_HALF_DOUBLE': files(source_root + '/numpy/_core/src/_simd/checks/extra_vsx3_half_double.c')[0] } ) VSX4 = mod_features.new( - 'VSX4', 4, implies: VSX3, args: {'val': '-mcpu=power10', 'match': '.*[mcpu=|vsx].*'}, + 'VSX4', 4, implies: VSX3, args: {'val': '-mcpu=power10', 'match': '.*(?:mcpu=|vsx).*'}, detect: {'val': 'VSX4', 'match': 'VSX.*'}, - test_code: files(source_root + '/numpy/distutils/checks/cpu_vsx4.c')[0], + test_code: files(source_root + '/numpy/_core/src/_simd/checks/cpu_vsx4.c')[0], extra_tests: { - 'VSX4_MMA': files(source_root + '/numpy/distutils/checks/extra_vsx4_mma.c')[0] + 'VSX4_MMA': files(source_root + '/numpy/_core/src/_simd/checks/extra_vsx4_mma.c')[0] } ) PPC64_FEATURES = {'VSX': VSX, 'VSX2': VSX2, 'VSX3': VSX3, 'VSX4': VSX4} diff --git a/meson_cpu/riscv64/meson.build b/meson_cpu/riscv64/meson.build index 3f930f39e27e..fdab67d246d6 100644 --- a/meson_cpu/riscv64/meson.build +++ b/meson_cpu/riscv64/meson.build @@ -3,6 +3,6 @@ mod_features = import('features') RVV = mod_features.new( 'RVV', 1, args: ['-march=rv64gcv'], - test_code: files(source_root + '/numpy/distutils/checks/cpu_rvv.c')[0], + test_code: files(source_root + '/numpy/_core/src/_simd/checks/cpu_rvv.c')[0], ) RV64_FEATURES = {'RVV': RVV} diff --git a/meson_cpu/s390x/meson.build b/meson_cpu/s390x/meson.build index a69252d1607c..282ec056e78e 100644 --- a/meson_cpu/s390x/meson.build +++ b/meson_cpu/s390x/meson.build @@ -3,16 +3,16 @@ mod_features = import('features') VX = mod_features.new( 'VX', 1, args: ['-mzvector', '-march=arch11'], - test_code: files(source_root + '/numpy/distutils/checks/cpu_vx.c')[0], + test_code: files(source_root + '/numpy/_core/src/_simd/checks/cpu_vx.c')[0], ) VXE = mod_features.new( 'VXE', 2, implies: VX, args: {'val': '-march=arch12', 'match': '-march=.*'}, - detect: {'val': 'VXE', 'match': 'VX'}, - test_code: files(source_root + '/numpy/distutils/checks/cpu_vxe.c')[0], + detect: {'val': 'VXE', 'match': '\\bvxe\\b'}, + test_code: files(source_root + '/numpy/_core/src/_simd/checks/cpu_vxe.c')[0], ) VXE2 = mod_features.new( 'VXE2', 3, implies: VXE, args: {'val': '-march=arch13', 'match': '-march=.*'}, - detect: {'val': 'VXE2', 'match': 'VX.*'}, - test_code: files(source_root + '/numpy/distutils/checks/cpu_vxe2.c')[0], + detect: {'val': 'VXE2', 'match': '\\bvxe2\\b'}, + test_code: files(source_root + '/numpy/_core/src/_simd/checks/cpu_vxe2.c')[0], ) S390X_FEATURES = {'VX': VX, 'VXE': VXE, 'VXE2': VXE2} diff --git a/meson_cpu/x86/meson.build b/meson_cpu/x86/meson.build index 8c7a0fb59a57..412803e5ddbb 100644 --- a/meson_cpu/x86/meson.build +++ b/meson_cpu/x86/meson.build @@ -1,228 +1,109 @@ source_root = meson.project_source_root() +current_dir = meson.current_source_dir() +cpu_family = host_machine.cpu_family() mod_features = import('features') -SSE = mod_features.new( - 'SSE', 1, args: '-msse', - test_code: files(source_root + '/numpy/distutils/checks/cpu_sse.c')[0] -) -SSE2 = mod_features.new( - 'SSE2', 2, implies: SSE, - args: '-msse2', - test_code: files(source_root + '/numpy/distutils/checks/cpu_sse2.c')[0] -) -# enabling SSE without SSE2 is useless also it's non-optional for x86_64 -SSE.update(implies: SSE2) -SSE3 = mod_features.new( - 'SSE3', 3, implies: SSE2, - args: '-msse3', - test_code: files(source_root + '/numpy/distutils/checks/cpu_sse3.c')[0] -) -SSSE3 = mod_features.new( - 'SSSE3', 4, implies: SSE3, - args: '-mssse3', - test_code: files(source_root + '/numpy/distutils/checks/cpu_ssse3.c')[0] -) -SSE41 = mod_features.new( - 'SSE41', 5, implies: SSSE3, - args: '-msse4.1', - test_code: files(source_root + '/numpy/distutils/checks/cpu_sse41.c')[0] -) -POPCNT = mod_features.new( - 'POPCNT', 6, implies: SSE41, - args: '-mpopcnt', - test_code: files(source_root + '/numpy/distutils/checks/cpu_popcnt.c')[0] -) -SSE42 = mod_features.new( - 'SSE42', 7, implies: POPCNT, args: '-msse4.2', - test_code: files(source_root + '/numpy/distutils/checks/cpu_sse42.c')[0] -) -# 7-20 left as margin for any extra features -AVX = mod_features.new( - 'AVX', 20, implies: SSE42, args: '-mavx', - detect: {'val': 'AVX', 'match': '.*SSE.*'}, - test_code: files(source_root + '/numpy/distutils/checks/cpu_avx.c')[0] -) -XOP = mod_features.new( - 'XOP', 21, implies: AVX, args: '-mxop', - test_code: files(source_root + '/numpy/distutils/checks/cpu_xop.c')[0] -) -FMA4 = mod_features.new( - 'FMA4', 22, implies: AVX, args: '-mfma4', - test_code: files(source_root + '/numpy/distutils/checks/cpu_fma4.c')[0] -) -# x86 half-precision -F16C = mod_features.new( - 'F16C', 23, implies: AVX, args: '-mf16c', - test_code: files(source_root + '/numpy/distutils/checks/cpu_f16c.c')[0] -) -FMA3 = mod_features.new( - 'FMA3', 24, implies: F16C, args: '-mfma', - test_code: files(source_root + '/numpy/distutils/checks/cpu_fma3.c')[0] -) -# match this to HWY_AVX2 -AVX2 = mod_features.new( - 'AVX2', 25, implies: FMA3, args: ['-mavx2', '-maes', '-mpclmul', '-mbmi', '-mbmi2'], - test_code: files(source_root + '/numpy/distutils/checks/cpu_avx2.c')[0] -) -# 25-40 left as margin for any extra features -AVX512F = mod_features.new( - 'AVX512F', 40, implies: [AVX2], - # Disables mmx because of stack corruption that may happen during mask - # conversions. - # TODO (seiko2plus): provide more clarification - args: ['-mno-mmx', '-mavx512f'], - detect: {'val': 'AVX512F', 'match': '.*'}, - test_code: files(source_root + '/numpy/distutils/checks/cpu_avx512f.c')[0], - extra_tests: { - 'AVX512F_REDUCE': files(source_root + '/numpy/distutils/checks/extra_avx512f_reduce.c')[0] - } -) -AVX512CD = mod_features.new( - 'AVX512CD', 41, implies: AVX512F, args: '-mavx512cd', - test_code: files(source_root + '/numpy/distutils/checks/cpu_avx512cd.c')[0] -) -AVX512_KNL = mod_features.new( - 'AVX512_KNL', 42, implies: AVX512CD, args: ['-mavx512er', '-mavx512pf'], - group: ['AVX512ER', 'AVX512PF'], - test_code: files(source_root + '/numpy/distutils/checks/cpu_avx512_knl.c')[0] -) -AVX512_KNM = mod_features.new( - 'AVX512_KNM', 43, implies: AVX512_KNL, - args: ['-mavx5124fmaps', '-mavx5124vnniw', '-mavx512vpopcntdq'], - group: ['AVX5124FMAPS', 'AVX5124VNNIW', 'AVX512VPOPCNTDQ'], - test_code: files(source_root + '/numpy/distutils/checks/cpu_avx512_knm.c')[0] -) -AVX512_SKX = mod_features.new( - 'AVX512_SKX', 50, implies: AVX512CD, - args: ['-mavx512vl', '-mavx512bw', '-mavx512dq'], - group: ['AVX512VL', 'AVX512BW', 'AVX512DQ'], - test_code: files(source_root + '/numpy/distutils/checks/cpu_avx512_skx.c')[0], - extra_tests: { - 'AVX512BW_MASK': files(source_root + '/numpy/distutils/checks/extra_avx512bw_mask.c')[0], - 'AVX512DQ_MASK': files(source_root + '/numpy/distutils/checks/extra_avx512dq_mask.c')[0] - } -) -AVX512_CLX = mod_features.new( - 'AVX512_CLX', 51, implies: AVX512_SKX, args: '-mavx512vnni', - group: ['AVX512VNNI'], - test_code: files(source_root + '/numpy/distutils/checks/cpu_avx512_clx.c')[0] -) -AVX512_CNL = mod_features.new( - 'AVX512_CNL', 52, implies: AVX512_SKX, - args: ['-mavx512ifma', '-mavx512vbmi'], - group: ['AVX512IFMA', 'AVX512VBMI'], - test_code: files(source_root + '/numpy/distutils/checks/cpu_avx512_cnl.c')[0] -) +HWY_SSE4_FLAGS = ['-DHWY_WANT_SSE4', '-DHWY_DISABLE_PCLMUL_AES'] +# Use SSE for floating-point on x86-32 to ensure numeric consistency. +# The x87 FPU's 80-bit internal precision causes unpredictable rounding +# and overflow behavior when converting to smaller types. SSE maintains +# strict 32/64-bit precision throughout all calculations. +X86_64_V2_FLAGS = cpu_family == 'x86'? ['-mfpmath=sse'] : ['-mcx16'] +X86_64_V2_NAMES = cpu_family == 'x86'? [] : ['CX16'] +X86_V2 = mod_features.new( + 'X86_V2', 1, args: ['-msse', '-msse2', '-msse3', '-mssse3', '-msse4.1', '-msse4.2', + '-mpopcnt', '-msahf'] + X86_64_V2_FLAGS + HWY_SSE4_FLAGS, + # Adds compiler definitions `NPY_HAVE_SSE*` + group: ['SSE', 'SSE2', 'SSE3', 'SSSE3', 'SSE41', 'SSE42', 'POPCNT', 'LAHF'] + X86_64_V2_NAMES, + detect: 'X86_V2', + test_code: files(current_dir + '/test_x86_v2.c')[0], +) +X86_V3 = mod_features.new( + 'X86_V3', 10, implies: X86_V2, + args: ['-mavx', '-mavx2', '-mfma', '-mbmi', '-mbmi2', '-mlzcnt', '-mf16c', '-mmovbe'], + group: ['AVX', 'AVX2', 'FMA3', 'BMI', 'BMI2', 'LZCNT', 'F16C', 'MOVBE'], + detect: 'X86_V3', + test_code: files(current_dir + '/test_x86_v3.c')[0], +) +X86_V4 = mod_features.new( + 'X86_V4', 20, implies: X86_V3, + args: ['-mavx512f', '-mavx512cd', '-mavx512vl', '-mavx512bw', '-mavx512dq'], + group: ['AVX512F', 'AVX512CD', 'AVX512VL', 'AVX512BW', 'AVX512DQ', 'AVX512_SKX', + 'AVX512F_REDUCE', 'AVX512BW_MASK', 'AVX512DQ_MASK'], + detect: 'X86_V4', + test_code: files(current_dir + '/test_x86_v4.c')[0], +) +if cpu_family == 'x86' + X86_V4.update(disable: 'not supported on x86-32') +endif AVX512_ICL = mod_features.new( - 'AVX512_ICL', 53, implies: [AVX512_CLX, AVX512_CNL], - args: ['-mavx512vbmi2', '-mavx512bitalg', '-mavx512vpopcntdq'], - group: ['AVX512VBMI2', 'AVX512BITALG', 'AVX512VPOPCNTDQ'], - test_code: files(source_root + '/numpy/distutils/checks/cpu_avx512_icl.c')[0] + 'AVX512_ICL', 30, implies: X86_V4, + args: ['-mavx512vbmi', '-mavx512vbmi2', '-mavx512vnni', '-mavx512bitalg', + '-mavx512vpopcntdq', '-mavx512ifma', '-mvaes', '-mgfni', '-mvpclmulqdq'], + group: ['AVX512VBMI', 'AVX512VBMI2', 'AVX512VNNI', 'AVX512BITALG', 'AVX512VPOPCNTDQ', + 'AVX512IFMA', 'VAES', 'GFNI', 'VPCLMULQDQ'], + detect: 'AVX512_ICL', + test_code: files(source_root + '/numpy/_core/src/_simd/checks/cpu_avx512_icl.c')[0] ) -# TODO add support for zen4 AVX512_SPR = mod_features.new( - 'AVX512_SPR', 55, implies: AVX512_ICL, - args: ['-mavx512fp16'], - group: ['AVX512FP16'], - test_code: files(source_root + '/numpy/distutils/checks/cpu_avx512_spr.c')[0] + 'AVX512_SPR', 35, implies: AVX512_ICL, + args: ['-mavx512fp16', '-mavx512bf16'], + group: ['AVX512FP16', 'AVX512BF16'], + detect: 'AVX512_SPR', + test_code: files(source_root + '/numpy/_core/src/_simd/checks/cpu_avx512_spr.c')[0] ) # Specializations for non unix-like compilers # ------------------------------------------- -cpu_family = host_machine.cpu_family() -compiler_id = meson.get_compiler('c').get_id() +cc = meson.get_compiler('c') +compiler_id = cc.get_id() if compiler_id not in ['gcc', 'clang'] AVX512_SPR.update(disable: compiler_id + ' compiler does not support it') endif -# Common specializations between both Intel compilers (unix-like and msvc-like) -if compiler_id in ['intel', 'intel-cl'] - # POPCNT, and F16C don't own private FLAGS however the compiler still - # provides ISA capability for them. - POPCNT.update(args: '') - F16C.update(args: '') - # Intel compilers don't support the following features independently - FMA3.update(implies: [F16C, AVX2]) - AVX2.update(implies: [F16C, FMA3]) - AVX512F.update(implies: [AVX2, AVX512CD]) - AVX512CD.update(implies: [AVX512F]) - XOP.update(disable: 'Intel Compiler does not support it') - FMA4.update(disable: 'Intel Compiler does not support it') -endif - if compiler_id == 'intel-cl' - foreach fet : [SSE, SSE2, SSE3, SSSE3, AVX] - fet.update(args: {'val': '/arch:' + fet.get('name'), 'match': '/arch:.*'}) - endforeach - SSE41.update(args: {'val': '/arch:SSE4.1', 'match': '/arch:.*'}) - SSE42.update(args: {'val': '/arch:SSE4.2', 'match': '/arch:.*'}) - FMA3.update(args: {'val': '/arch:CORE-AVX2', 'match': '/arch:.*'}) - AVX2.update(args: {'val': '/arch:CORE-AVX2', 'match': '/arch:.*'}) - AVX512F.update(args: {'val': '/Qx:COMMON-AVX512', 'match': '/arch:.*'}) - AVX512CD.update(args: {'val': '/Qx:COMMON-AVX512', 'match': '/arch:.*'}) - AVX512_KNL.update(args: {'val': '/Qx:KNL', 'match': '/[arch|Qx]:.*'}) - AVX512_KNM.update(args: {'val': '/Qx:KNM', 'match': '/[arch|Qx]:.*'}) - AVX512_SKX.update(args: {'val': '/Qx:SKYLAKE-AVX512', 'match': '/[arch|Qx]:.*'}) - AVX512_CLX.update(args: {'val': '/Qx:CASCADELAKE', 'match': '/[arch|Qx]:.*'}) - AVX512_CNL.update(args: {'val': '/Qx:CANNONLAKE', 'match': '/[arch|Qx]:.*'}) + X86_V2.update(args: [{'val': '/arch:SSE4.2', 'match': '/arch:.*'}] + HWY_SSE4_FLAGS) + X86_V3.update(args: {'val': '/arch:CORE-AVX2', 'match': '/arch:.*'}) + X86_V4.update(args: {'val': '/Qx:SKYLAKE-AVX512', 'match': '/[arch|Qx]:.*'}) AVX512_ICL.update(args: {'val': '/Qx:ICELAKE-CLIENT', 'match': '/[arch|Qx]:.*'}) endif if compiler_id == 'intel' - clear_m = '^(-mcpu=|-march=)' clear_any = '^(-mcpu=|-march=|-x[A-Z0-9\-])' - FMA3.update(args: {'val': '-xCORE-AVX2', 'match': clear_m}) - AVX2.update(args: {'val': '-xCORE-AVX2', 'match': clear_m}) - AVX512F.update(args: {'val': '-xCOMMON-AVX512', 'match': clear_m}) - AVX512CD.update(args: {'val': '-xCOMMON-AVX512', 'match': clear_m}) - AVX512_KNL.update(args: {'val': '-xKNL', 'match': clear_any}) - AVX512_KNM.update(args: {'val': '-xKNM', 'match': clear_any}) - AVX512_SKX.update(args: {'val': '-xSKYLAKE-AVX512', 'match': clear_any}) - AVX512_CLX.update(args: {'val': '-xCASCADELAKE', 'match': clear_any}) - AVX512_CNL.update(args: {'val': '-xCANNONLAKE', 'match': clear_any}) + X86_V2.update(args: [{'val': '-xSSE4.2', 'match': clear_any}] + HWY_SSE4_FLAGS) + X86_V3.update(args: {'val': '-xCORE-AVX2', 'match': clear_any}) + X86_V4.update(args: {'val': '-xSKYLAKE-AVX512', 'match': clear_any}) AVX512_ICL.update(args: {'val': '-xICELAKE-CLIENT', 'match': clear_any}) endif if compiler_id == 'msvc' - # MSVC compiler doesn't support the following features - foreach fet : [AVX512_KNL, AVX512_KNM] - fet.update(disable: compiler_id + ' compiler does not support it') - endforeach - # The following features don't own private FLAGS, however the compiler still - # provides ISA capability for them. - foreach fet : [ - SSE3, SSSE3, SSE41, POPCNT, SSE42, AVX, F16C, XOP, FMA4, - AVX512F, AVX512CD, AVX512_CLX, AVX512_CNL, - AVX512_ICL - ] - fet.update(args: '') - endforeach - # MSVC compiler doesn't support the following features independently - FMA3.update(implies: [F16C, AVX2]) - AVX2.update(implies: [F16C, FMA3]) - AVX512F.update(implies: [AVX2, AVX512CD, AVX512_SKX]) - AVX512CD.update(implies: [AVX512F, AVX512_SKX]) + cc_ver = cc.version() + MSVC_SSE4 = cc_ver.version_compare('>=19.40') ? ['/arch:SSE4.2'] : [] + # 32-bit MSVC does not support /arch:SSE4.2 + MSVC_SSE4 = cpu_family == 'x86' ? ['/arch:SSE2'] : MSVC_SSE4 + MSVC_SSE4 = cc_ver.version_compare('>=19.30') ? MSVC_SSE4 + ['/fp:contract'] : MSVC_SSE4 + X86_V2.update(args: MSVC_SSE4 + HWY_SSE4_FLAGS) clear_arch = '/arch:.*' - # only available on 32-bit. Its enabled by default on 64-bit mode - foreach fet : [SSE, SSE2] - if cpu_family == 'x86' - fet.update(args: {'val': '/arch:' + fet.get('name'), 'match': clear_arch}) - else - fet.update(args: '') - endif - endforeach - FMA3.update(args: {'val': '/arch:AVX2', 'match': clear_arch}) - AVX2.update(args: {'val': '/arch:AVX2', 'match': clear_arch}) - AVX512_SKX.update(args: {'val': '/arch:AVX512', 'match': clear_arch}) + X86_V3.update(args: {'val': '/arch:AVX2', 'match': clear_arch}) + # FIXME: After completing transition from universal intrinsics to Highway, + # investigate which MSVC versions are incompatible with Highway's AVX-512 implementation. + X86_V4.update(disable: 'Considered broken by Highway on MSVC') + # To force enable AVX-512, use: + # X86_V4.update(args: [{'val': '/arch:AVX512', 'match': clear_arch}, '-DHWY_BROKEN_MSVC=0']) + AVX512_ICL.update(disable: 'unsupported by Highway on MSVC') endif +# legacy CPU features +X86_REDIRECT = { + 'SSE': 'X86_V2', 'SSE2': 'X86_V2', 'SSE3': 'X86_V2', 'SSSE3': 'X86_V2', + 'SSE41': 'X86_V2', 'SSE42': 'X86_V2', 'XOP': 'X86_V2', 'FMA4': 'X86_V2', + 'FMA3': 'X86_V3', 'AVX': 'X86_V3', 'F16C': 'X86_V3', + 'AVX512F': 'X86_V3', 'AVX512CD': 'X86_V3', + 'AVX512_KNL': 'X86_V3', 'AVX512_KNM': 'X86_V3', + 'AVX512_SKX': 'X86_V4', 'AVX512_CLX': 'X86_V4', 'AVX512_CNL': 'X86_V4', +} + X86_FEATURES = { - 'SSE': SSE, 'SSE2': SSE2, 'SSE3': SSE3, 'SSSE3': SSSE3, - 'SSE41': SSE41, 'POPCNT': POPCNT, 'SSE42': SSE42, 'AVX': AVX, - 'XOP': XOP, 'FMA4': FMA4, 'F16C': F16C, 'FMA3': FMA3, - 'AVX2': AVX2, 'AVX512F': AVX512F, 'AVX512CD': AVX512CD, - 'AVX512_KNL': AVX512_KNL, 'AVX512_KNM': AVX512_KNM, - 'AVX512_SKX': AVX512_SKX, 'AVX512_CLX': AVX512_CLX, - 'AVX512_CNL': AVX512_CNL, 'AVX512_ICL': AVX512_ICL, - 'AVX512_SPR': AVX512_SPR + 'X86_V2': X86_V2, 'X86_V3': X86_V3, 'X86_V4': X86_V4, + 'AVX512_ICL': AVX512_ICL, 'AVX512_SPR': AVX512_SPR } diff --git a/meson_cpu/x86/test_x86_v2.c b/meson_cpu/x86/test_x86_v2.c new file mode 100644 index 000000000000..f897957224d5 --- /dev/null +++ b/meson_cpu/x86/test_x86_v2.c @@ -0,0 +1,69 @@ +#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER) + /* + * Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics, + * whether or not the build options for those features are specified. + * Therefore, we must test #definitions of CPU features when option native/host + * is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise + * the test will be broken and leads to enable all possible features. + */ + #if !defined(__SSE__) || !defined(__SSE2__) || !defined(__SSE3__) || \ + !defined(__SSSE3__) || !defined(__SSE4_1__) || !defined(__SSE4_2__) || !defined(__POPCNT__) + #error HOST/ARCH does not support x86_v2 + #endif +#endif + +#include // SSE +#include // SSE2 +#include // SSE3 +#include // SSSE3 +#include // SSE4.1 +#ifdef _MSC_VER + #include // SSE4.2 and POPCNT for MSVC +#else + #include // SSE4.2 + #include // POPCNT +#endif + +int main(int argc, char **argv) +{ + // to prevent optimization + int seed = (int)argv[argc-1][0]; + volatile int result = 0; + + // SSE test + __m128 a = _mm_set1_ps((float)seed); + __m128 b = _mm_set1_ps(2.0f); + __m128 c = _mm_add_ps(a, b); + result += (int)_mm_cvtss_f32(c); + + // SSE2 test + __m128i ai = _mm_set1_epi32(seed); + __m128i bi = _mm_set1_epi32(2); + __m128i ci = _mm_add_epi32(ai, bi); + result += _mm_cvtsi128_si32(ci); + + // SSE3 test + __m128 d = _mm_movehdup_ps(a); + result += (int)_mm_cvtss_f32(d); + + // SSSE3 test + __m128i di = _mm_abs_epi16(_mm_set1_epi16((short)seed)); + result += _mm_cvtsi128_si32(di); + + // SSE4.1 test + __m128i ei = _mm_max_epi32(ai, bi); + result += _mm_cvtsi128_si32(ei); + + // SSE4.2 test + __m128i str1 = _mm_set1_epi8((char)seed); + __m128i str2 = _mm_set1_epi8((char)(seed + 1)); + int res4_2 = _mm_cmpestra(str1, 4, str2, 4, 0); + result += res4_2; + + // POPCNT test + unsigned int test_val = (unsigned int)seed | 0x01234567; + int pcnt = _mm_popcnt_u32(test_val); + result += pcnt; + + return result; +} diff --git a/meson_cpu/x86/test_x86_v3.c b/meson_cpu/x86/test_x86_v3.c new file mode 100644 index 000000000000..0bc496a93ad0 --- /dev/null +++ b/meson_cpu/x86/test_x86_v3.c @@ -0,0 +1,66 @@ +#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER) + /* + * Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics, + * whether or not the build options for those features are specified. + * Therefore, we must test #definitions of CPU features when option native/host + * is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise + * the test will be broken and leads to enable all possible features. + */ + #if !defined(__AVX__) || !defined(__AVX2__) || !defined(__FMA__) || \ + !defined(__BMI__) || !defined(__BMI2__) || !defined(__LZCNT__) || !defined(__F16C__) + #error HOST/ARCH does not support x86_v3 + #endif +#endif + +#include + +int main(int argc, char **argv) +{ + // to prevent optimization + int seed = (int)argv[argc-1][0]; + volatile int result = 0; + + // AVX test + __m256 avx_a = _mm256_set1_ps((float)seed); + __m256 avx_b = _mm256_set1_ps(2.0f); + __m256 avx_c = _mm256_add_ps(avx_a, avx_b); + float avx_result = _mm256_cvtss_f32(avx_c); + result += (int)avx_result; + + // AVX2 test + __m256i avx2_a = _mm256_set1_epi32(seed); + __m256i avx2_b = _mm256_set1_epi32(2); + __m256i avx2_c = _mm256_add_epi32(avx2_a, avx2_b); + result += _mm256_extract_epi32(avx2_c, 0); + + // FMA test + __m256 fma_a = _mm256_set1_ps((float)seed); + __m256 fma_b = _mm256_set1_ps(2.0f); + __m256 fma_c = _mm256_set1_ps(3.0f); + __m256 fma_result = _mm256_fmadd_ps(fma_a, fma_b, fma_c); + result += (int)_mm256_cvtss_f32(fma_result); + + // BMI1 tests + unsigned int bmi1_src = (unsigned int)seed; + unsigned int tzcnt_result = _tzcnt_u32(bmi1_src); + result += tzcnt_result; + + // BMI2 tests + unsigned int bzhi_result = _bzhi_u32(bmi1_src, 17); + result += (int)bzhi_result; + + unsigned int pdep_result = _pdep_u32(bmi1_src, 0x10101010); + result += pdep_result; + + // LZCNT test + unsigned int lzcnt_result = _lzcnt_u32(bmi1_src); + result += lzcnt_result; + + // F16C tests + __m128 f16c_src = _mm_set1_ps((float)seed); + __m128i f16c_half = _mm_cvtps_ph(f16c_src, 0); + __m128 f16c_restored = _mm_cvtph_ps(f16c_half); + result += (int)_mm_cvtss_f32(f16c_restored); + + return result; +} diff --git a/meson_cpu/x86/test_x86_v4.c b/meson_cpu/x86/test_x86_v4.c new file mode 100644 index 000000000000..d49c3a78e3b3 --- /dev/null +++ b/meson_cpu/x86/test_x86_v4.c @@ -0,0 +1,88 @@ +#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER) + /* + * Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics, + * whether or not the build options for those features are specified. + * Therefore, we must test #definitions of CPU features when option native/host + * is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise + * the test will be broken and leads to enable all possible features. + */ + #if !defined(__AVX512F__) || !defined(__AVX512CD__) || !defined(__AVX512VL__) || \ + !defined(__AVX512BW__) || !defined(__AVX512DQ__) + #error HOST/ARCH does not support x86_v4 + #endif +#endif + +#include + +int main(int argc, char **argv) +{ + // to prevent optimization + int seed = (int)argv[argc-1][0]; + volatile int result = 0; + + // AVX512F tests (Foundation) + __m512 avx512f_a = _mm512_set1_ps((float)seed); + __m512 avx512f_b = _mm512_set1_ps(2.0f); + __m512 avx512f_c = _mm512_add_ps(avx512f_a, avx512f_b); + float avx512f_result = _mm512_cvtss_f32(avx512f_c); + result += (int)avx512f_result; + + // Test AVX512F mask operations + __mmask16 k1 = _mm512_cmpeq_ps_mask(avx512f_a, avx512f_b); + __m512 masked_result = _mm512_mask_add_ps(avx512f_a, k1, avx512f_b, avx512f_c); + result += _mm512_mask2int(k1); + + // AVX512CD tests (Conflict Detection) + __m512i avx512cd_a = _mm512_set1_epi32(seed); + __m512i avx512cd_b = _mm512_conflict_epi32(avx512cd_a); + result += _mm_cvtsi128_si32(_mm512_extracti32x4_epi32(avx512cd_b, 0)); + + __m512i avx512cd_lzcnt = _mm512_lzcnt_epi32(avx512cd_a); + result += _mm_cvtsi128_si32(_mm512_extracti32x4_epi32(avx512cd_lzcnt, 0)); + + // AVX512VL tests (Vector Length Extensions - 128/256-bit vectors with AVX512 features) + __m256 avx512vl_a = _mm256_set1_ps((float)seed); + __m256 avx512vl_b = _mm256_set1_ps(2.0f); + __mmask8 k2 = _mm256_cmp_ps_mask(avx512vl_a, avx512vl_b, _CMP_EQ_OQ); + __m256 avx512vl_c = _mm256_mask_add_ps(avx512vl_a, k2, avx512vl_a, avx512vl_b); + result += (int)_mm256_cvtss_f32(avx512vl_c); + + __m128 avx512vl_sm_a = _mm_set1_ps((float)seed); + __m128 avx512vl_sm_b = _mm_set1_ps(2.0f); + __mmask8 k3 = _mm_cmp_ps_mask(avx512vl_sm_a, avx512vl_sm_b, _CMP_EQ_OQ); + __m128 avx512vl_sm_c = _mm_mask_add_ps(avx512vl_sm_a, k3, avx512vl_sm_a, avx512vl_sm_b); + result += (int)_mm_cvtss_f32(avx512vl_sm_c); + + // AVX512BW tests (Byte and Word) + __m512i avx512bw_a = _mm512_set1_epi16((short)seed); + __m512i avx512bw_b = _mm512_set1_epi16(2); + __mmask32 k4 = _mm512_cmpeq_epi16_mask(avx512bw_a, avx512bw_b); + __m512i avx512bw_c = _mm512_mask_add_epi16(avx512bw_a, k4, avx512bw_a, avx512bw_b); + result += _mm_cvtsi128_si32(_mm512_extracti32x4_epi32(avx512bw_c, 0)); + + // Test byte operations + __m512i avx512bw_bytes_a = _mm512_set1_epi8((char)seed); + __m512i avx512bw_bytes_b = _mm512_set1_epi8(2); + __mmask64 k5 = _mm512_cmpeq_epi8_mask(avx512bw_bytes_a, avx512bw_bytes_b); + result += (k5 & 1); + + // AVX512DQ tests (Doubleword and Quadword) + __m512d avx512dq_a = _mm512_set1_pd((double)seed); + __m512d avx512dq_b = _mm512_set1_pd(2.0); + __mmask8 k6 = _mm512_cmpeq_pd_mask(avx512dq_a, avx512dq_b); + __m512d avx512dq_c = _mm512_mask_add_pd(avx512dq_a, k6, avx512dq_a, avx512dq_b); + double avx512dq_result = _mm512_cvtsd_f64(avx512dq_c); + result += (int)avx512dq_result; + + // Test integer to/from floating point conversion + __m512i avx512dq_back = _mm512_cvtps_epi32(masked_result); + result += _mm_cvtsi128_si32(_mm512_extracti32x4_epi32(avx512dq_back, 0)); + + // Test 64-bit integer operations + __m512i avx512dq_i64_a = _mm512_set1_epi64(seed); + __m512i avx512dq_i64_b = _mm512_set1_epi64(2); + __m512i avx512dq_i64_c = _mm512_add_epi64(avx512dq_i64_a, avx512dq_i64_b); + result += _mm_cvtsi128_si32(_mm512_extracti32x4_epi32(avx512dq_i64_c, 0)); + + return result; +} diff --git a/numpy/__config__.pyi b/numpy/__config__.pyi index b59bdcd252b6..21e8b01fdd96 100644 --- a/numpy/__config__.pyi +++ b/numpy/__config__.pyi @@ -1,7 +1,13 @@ from enum import Enum from types import ModuleType -from typing import Final, NotRequired, TypedDict, overload, type_check_only -from typing import Literal as L +from typing import ( + Final, + Literal as L, + NotRequired, + TypedDict, + overload, + type_check_only, +) _CompilerConfigDictValue = TypedDict( "_CompilerConfigDictValue", diff --git a/numpy/__init__.cython-30.pxd b/numpy/__init__.cython-30.pxd index 86c91cf617a5..c71898626070 100644 --- a/numpy/__init__.cython-30.pxd +++ b/numpy/__init__.cython-30.pxd @@ -156,6 +156,7 @@ cdef extern from "numpy/arrayobject.h": NPY_SAFE_CASTING NPY_SAME_KIND_CASTING NPY_UNSAFE_CASTING + NPY_SAME_VALUE_CASTING ctypedef enum NPY_CLIPMODE: NPY_CLIP diff --git a/numpy/__init__.pxd b/numpy/__init__.pxd index eb0764126116..40a24b6c7cc1 100644 --- a/numpy/__init__.pxd +++ b/numpy/__init__.pxd @@ -165,6 +165,7 @@ cdef extern from "numpy/arrayobject.h": NPY_SAFE_CASTING NPY_SAME_KIND_CASTING NPY_UNSAFE_CASTING + NPY_SAME_VALUE_CASTING ctypedef enum NPY_CLIPMODE: NPY_CLIP diff --git a/numpy/__init__.py b/numpy/__init__.py index 8fb2e742dfc4..5012decc43ab 100644 --- a/numpy/__init__.py +++ b/numpy/__init__.py @@ -52,9 +52,6 @@ Polynomial tools testing NumPy testing tools -distutils - Enhancements to distutils with support for - Fortran compilers support and more (for Python <= 3.11) Utilities --------- @@ -111,10 +108,13 @@ try: from numpy.__config__ import show_config except ImportError as e: - msg = """Error importing numpy: you should not try to import numpy from - its source directory; please exit the numpy source tree, and relaunch - your python interpreter from there.""" - raise ImportError(msg) from e + if isinstance(e, ModuleNotFoundError) and e.name == "numpy.__config__": + # The __config__ module itself was not found, so add this info: + msg = """Error importing numpy: you should not try to import numpy from + its source directory; please exit the numpy source tree, and relaunch + your python interpreter from there.""" + raise ImportError(msg) from e + raise from . import _core from ._core import ( @@ -451,13 +451,11 @@ pass del ta - from . import lib - from . import matrixlib as _mat + from . import lib, matrixlib as _mat from .lib import scimath as emath from .lib._arraypad_impl import pad from .lib._arraysetops_impl import ( ediff1d, - in1d, intersect1d, isin, setdiff1d, @@ -504,7 +502,6 @@ sinc, sort_complex, trapezoid, - trapz, trim_zeros, unwrap, vectorize, @@ -624,8 +621,8 @@ from .matrixlib import asmatrix, bmat, matrix # public submodules are imported lazily, therefore are accessible from - # __getattr__. Note that `distutils` (deprecated) and `array_api` - # (experimental label) are not added here, because `from numpy import *` + # __getattr__. Note that `array_api` + # (experimental label) is not added here, because `from numpy import *` # must not raise any warnings - that's too disruptive. __numpy_submodules__ = { "linalg", "fft", "dtypes", "random", "polynomial", "ma", @@ -675,9 +672,6 @@ from ._array_api_info import __array_namespace_info__ - # now that numpy core module is imported, can initialize limits - _core.getlimits._register_known_types() - __all__ = list( __numpy_submodules__ | set(_core.__all__) | @@ -750,23 +744,12 @@ def __getattr__(attr): elif attr == "char": import numpy.char as char return char - elif attr == "array_api": - raise AttributeError("`numpy.array_api` is not available from " - "numpy 2.0 onwards", name=None) elif attr == "core": import numpy.core as core return core elif attr == "strings": import numpy.strings as strings return strings - elif attr == "distutils": - if 'distutils' in __numpy_submodules__: - import numpy.distutils as distutils - return distutils - else: - raise AttributeError("`numpy.distutils` is not available from " - "Python 3.12 onwards", name=None) - if attr in __future_scalars__: # And future warnings for those that will change, but also give # the AttributeError @@ -800,7 +783,7 @@ def __dir__(): ) public_symbols -= { "matrixlib", "matlib", "tests", "conftest", "version", - "distutils", "array_api" + "array_api" } return list(public_symbols) @@ -873,6 +856,23 @@ def _mac_os_check(): del w del _mac_os_check + def blas_fpe_check(): + # Check if BLAS adds spurious FPEs, mostly seen on M4 arms with Accelerate. + with errstate(all='raise'): + x = ones((20, 20)) + try: + x @ x + except FloatingPointError: + res = _core._multiarray_umath._blas_supports_fpe(False) + if res: # res was not modified (hardcoded to True for now) + warnings.warn( + "Spurious warnings given by blas but suppression not " + "set up on this platform. Please open a NumPy issue.", + UserWarning, stacklevel=2) + + blas_fpe_check() + del blas_fpe_check + def hugepage_setup(): """ We usually use madvise hugepages support, but on some old kernels it diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 0359e605a1c3..4128dd3cb9ec 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -1,22 +1,20 @@ # ruff: noqa: I001 -import builtins -import sys -import mmap import ctypes as ct -import array as _array import datetime as dt +import inspect +import sys from abc import abstractmethod -from types import EllipsisType, ModuleType, TracebackType, MappingProxyType, GenericAlias +from builtins import bool as py_bool from decimal import Decimal from fractions import Fraction +from types import EllipsisType, ModuleType, TracebackType, MappingProxyType, GenericAlias from uuid import UUID -import numpy as np from numpy.__config__ import show as show_config from numpy._pytesttester import PytestTester from numpy._core._internal import _ctypes -from numpy._typing import ( +from numpy._typing import ( # type: ignore[deprecated] # Arrays ArrayLike, NDArray, @@ -89,24 +87,15 @@ from numpy._typing import ( _Float64Codes, _Complex64Codes, _Complex128Codes, - _ByteCodes, - _ShortCodes, _IntCCodes, _IntPCodes, _LongCodes, _LongLongCodes, - _UByteCodes, - _UShortCodes, _UIntCCodes, _UIntPCodes, _ULongCodes, _ULongLongCodes, - _HalfCodes, - _SingleCodes, - _DoubleCodes, _LongDoubleCodes, - _CSingleCodes, - _CDoubleCodes, _CLongDoubleCodes, _DT64Codes, _TD64Codes, @@ -121,10 +110,7 @@ from numpy._typing import ( _FloatingCodes, _ComplexFloatingCodes, _InexactCodes, - _NumberCodes, _CharacterCodes, - _FlexibleCodes, - _GenericCodes, # Ufuncs _UFunc_Nin1_Nout1, _UFunc_Nin2_Nout1, @@ -133,32 +119,6 @@ from numpy._typing import ( _GUFunc_Nin2_Nout1, ) -from numpy._typing._callable import ( - _BoolOp, - _BoolBitOp, - _BoolSub, - _BoolTrueDiv, - _BoolMod, - _BoolDivMod, - _IntTrueDiv, - _UnsignedIntOp, - _UnsignedIntBitOp, - _UnsignedIntMod, - _UnsignedIntDivMod, - _SignedIntOp, - _SignedIntBitOp, - _SignedIntMod, - _SignedIntDivMod, - _FloatOp, - _FloatMod, - _FloatDivMod, - _NumberOp, - _ComparisonOpLT, - _ComparisonOpLE, - _ComparisonOpGT, - _ComparisonOpGE, -) - # NOTE: Numpy's mypy plugin is used for removing the types unavailable to the specific platform from numpy._typing._extended_precision import ( float96, @@ -170,6 +130,7 @@ from numpy._typing._extended_precision import ( from numpy._array_api_info import __array_namespace_info__ from collections.abc import ( + Buffer, Callable, Iterable, Iterator, @@ -177,19 +138,6 @@ from collections.abc import ( Sequence, ) -if sys.version_info >= (3, 12): - from collections.abc import Buffer as _SupportsBuffer -else: - _SupportsBuffer: TypeAlias = ( - bytes - | bytearray - | memoryview - | _array.array[Any] - | mmap.mmap - | NDArray[Any] - | generic - ) - from typing import ( Any, ClassVar, @@ -205,10 +153,10 @@ from typing import ( SupportsFloat, SupportsInt, SupportsIndex, - TypeAlias, TypedDict, final, overload, + override, type_check_only, ) @@ -217,7 +165,7 @@ from typing import ( # library include `typing_extensions` stubs: # https://github.com/python/typeshed/blob/main/stdlib/typing_extensions.pyi from _typeshed import Incomplete, StrOrBytesPath, SupportsFlush, SupportsLenAndGetItem, SupportsWrite -from typing_extensions import CapsuleType, TypeVar +from typing_extensions import CapsuleType, TypeVar, deprecated from numpy import ( char, @@ -245,8 +193,6 @@ from numpy import ( matrixlib as matrixlib, version as version, ) -if sys.version_info < (3, 12): - from numpy import distutils as distutils from numpy._core.records import ( record, @@ -321,8 +267,7 @@ from numpy._core._ufunc_config import ( getbufsize, seterrcall, geterrcall, - _ErrKind, - _ErrCall, + errstate, ) from numpy._core.arrayprint import ( @@ -340,6 +285,10 @@ from numpy._core.einsumfunc import ( einsum, einsum_path, ) +from numpy._core.getlimits import ( + finfo, + iinfo, +) from numpy._core.multiarray import ( array, @@ -432,6 +381,8 @@ from numpy._core.shape_base import ( ) from ._expired_attrs_2_0 import __expired_attributes__ as __expired_attributes__ +from ._globals import _CopyMode as _CopyMode +from ._globals import _NoValue as _NoValue, _NoValueType from numpy.lib import ( scimath as emath, @@ -443,7 +394,6 @@ from numpy.lib._arraypad_impl import ( from numpy.lib._arraysetops_impl import ( ediff1d, - in1d, intersect1d, isin, setdiff1d, @@ -485,7 +435,6 @@ from numpy.lib._function_base_impl import ( blackman, kaiser, trapezoid, - trapz, i0, meshgrid, delete, @@ -493,10 +442,9 @@ from numpy.lib._function_base_impl import ( append, interp, quantile, + vectorize, ) -from numpy._globals import _CopyMode - from numpy.lib._histograms_impl import ( histogram_bin_edges, histogram, @@ -561,7 +509,7 @@ from numpy.lib._polynomial_impl import ( polyfit, ) -from numpy.lib._shape_base_impl import ( +from numpy.lib._shape_base_impl import ( # type: ignore[deprecated] column_stack, row_stack, dstack, @@ -632,6 +580,7 @@ from numpy.lib._utils_impl import ( from numpy.matrixlib import ( asmatrix, bmat, + matrix, ) __all__ = [ # noqa: RUF022 @@ -712,7 +661,7 @@ __all__ = [ # noqa: RUF022 "gradient", "angle", "unwrap", "sort_complex", "flip", "rot90", "extract", "place", "vectorize", "asarray_chkfinite", "average", "bincount", "digitize", "cov", "corrcoef", "median", "sinc", "hamming", "hanning", "bartlett", "blackman", - "kaiser", "trapezoid", "trapz", "i0", "meshgrid", "delete", "insert", "append", + "kaiser", "trapezoid", "i0", "meshgrid", "delete", "insert", "append", "interp", "quantile", # lib._twodim_base_impl.__all__ "diag", "diagflat", "eye", "fliplr", "flipud", "tri", "triu", "tril", "vander", @@ -726,7 +675,7 @@ __all__ = [ # noqa: RUF022 "iscomplexobj", "isrealobj", "imag", "iscomplex", "isreal", "nan_to_num", "real", "real_if_close", "typename", "mintypecode", "common_type", # lib._arraysetops_impl.__all__ - "ediff1d", "in1d", "intersect1d", "isin", "setdiff1d", "setxor1d", "union1d", + "ediff1d", "intersect1d", "isin", "setdiff1d", "setxor1d", "union1d", "unique", "unique_all", "unique_counts", "unique_inverse", "unique_values", # lib._ufunclike_impl.__all__ "fix", "isneginf", "isposinf", @@ -751,70 +700,20 @@ __all__ = [ # noqa: RUF022 "emath", "show_config", "__version__", "__array_namespace_info__", ] # fmt: skip -### Constrained types (for internal use only) -# Only use these for functions; never as generic type parameter. - -_AnyStr = TypeVar("_AnyStr", LiteralString, str, bytes) -_AnyShapeT = TypeVar( - "_AnyShapeT", - tuple[()], # 0-d - tuple[int], # 1-d - tuple[int, int], # 2-d - tuple[int, int, int], # 3-d - tuple[int, int, int, int], # 4-d - tuple[int, int, int, int, int], # 5-d - tuple[int, int, int, int, int, int], # 6-d - tuple[int, int, int, int, int, int, int], # 7-d - tuple[int, int, int, int, int, int, int, int], # 8-d - tuple[int, ...], # N-d -) -_AnyTD64Item = TypeVar("_AnyTD64Item", dt.timedelta, int, None, dt.timedelta | int | None) -_AnyDT64Arg = TypeVar("_AnyDT64Arg", dt.datetime, dt.date, None) -_AnyDT64Item = TypeVar("_AnyDT64Item", dt.datetime, dt.date, int, None, dt.date, int | None) -_AnyDate = TypeVar("_AnyDate", dt.date, dt.datetime) -_AnyDateOrTime = TypeVar("_AnyDateOrTime", dt.date, dt.datetime, dt.timedelta) - -### Type parameters (for internal use only) - -_T = TypeVar("_T") -_T_co = TypeVar("_T_co", covariant=True) -_T_contra = TypeVar("_T_contra", contravariant=True) -_RealT_co = TypeVar("_RealT_co", covariant=True) -_ImagT_co = TypeVar("_ImagT_co", covariant=True) +### Type parameters (with defaults); for internal use only -_CallableT = TypeVar("_CallableT", bound=Callable[..., object]) - -_DTypeT = TypeVar("_DTypeT", bound=dtype) -_DTypeT_co = TypeVar("_DTypeT_co", bound=dtype, default=dtype, covariant=True) -_FlexDTypeT = TypeVar("_FlexDTypeT", bound=dtype[flexible]) - -_ArrayT = TypeVar("_ArrayT", bound=ndarray) _ArrayT_co = TypeVar("_ArrayT_co", bound=ndarray, default=ndarray, covariant=True) -_IntegralArrayT = TypeVar("_IntegralArrayT", bound=NDArray[integer | np.bool | object_]) -_RealArrayT = TypeVar("_RealArrayT", bound=NDArray[floating | integer | timedelta64 | np.bool | object_]) -_NumericArrayT = TypeVar("_NumericArrayT", bound=NDArray[number | timedelta64 | object_]) - -_ShapeT = TypeVar("_ShapeT", bound=_Shape) _ShapeT_co = TypeVar("_ShapeT_co", bound=_Shape, default=_AnyShape, covariant=True) -_1DShapeT = TypeVar("_1DShapeT", bound=_1D) -_2DShapeT_co = TypeVar("_2DShapeT_co", bound=_2D, default=_2D, covariant=True) -_1NShapeT = TypeVar("_1NShapeT", bound=tuple[L[1], *tuple[L[1], ...]]) # (1,) | (1, 1) | (1, 1, 1) | ... - -_ScalarT = TypeVar("_ScalarT", bound=generic) +_DTypeT_co = TypeVar("_DTypeT_co", bound=dtype, default=dtype, covariant=True) _ScalarT_co = TypeVar("_ScalarT_co", bound=generic, default=Any, covariant=True) -_NumberT = TypeVar("_NumberT", bound=number) -_RealNumberT = TypeVar("_RealNumberT", bound=floating | integer) -_FloatingT_co = TypeVar("_FloatingT_co", bound=floating, default=floating, covariant=True) -_IntegerT = TypeVar("_IntegerT", bound=integer) -_IntegerT_co = TypeVar("_IntegerT_co", bound=integer, default=integer, covariant=True) -_NBit = TypeVar("_NBit", bound=NBitBase, default=Any) # pyright: ignore[reportDeprecated] -_NBit1 = TypeVar("_NBit1", bound=NBitBase, default=Any) # pyright: ignore[reportDeprecated] -_NBit2 = TypeVar("_NBit2", bound=NBitBase, default=_NBit1) # pyright: ignore[reportDeprecated] +# intentionally invariant +_NBitT = TypeVar("_NBitT", bound=NBitBase, default=Any) # pyright: ignore[reportDeprecated] +_NBitT1 = TypeVar("_NBitT1", bound=NBitBase, default=Any) # pyright: ignore[reportDeprecated] +_NBitT2 = TypeVar("_NBitT2", bound=NBitBase, default=_NBitT1) # pyright: ignore[reportDeprecated] _ItemT_co = TypeVar("_ItemT_co", default=Any, covariant=True) -_BoolItemT = TypeVar("_BoolItemT", bound=builtins.bool) -_BoolItemT_co = TypeVar("_BoolItemT_co", bound=builtins.bool, default=builtins.bool, covariant=True) +_BoolItemT_co = TypeVar("_BoolItemT_co", bound=py_bool, default=py_bool, covariant=True) _NumberItemT_co = TypeVar("_NumberItemT_co", bound=complex, default=int | float | complex, covariant=True) _InexactItemT_co = TypeVar("_InexactItemT_co", bound=complex, default=float | complex, covariant=True) _FlexibleItemT_co = TypeVar( @@ -824,73 +723,74 @@ _FlexibleItemT_co = TypeVar( covariant=True, ) _CharacterItemT_co = TypeVar("_CharacterItemT_co", bound=_CharLike_co, default=_CharLike_co, covariant=True) -_TD64ItemT_co = TypeVar("_TD64ItemT_co", bound=dt.timedelta | int | None, default=dt.timedelta | int | None, covariant=True) -_DT64ItemT_co = TypeVar("_DT64ItemT_co", bound=dt.date | int | None, default=dt.date | int | None, covariant=True) -_TD64UnitT = TypeVar("_TD64UnitT", bound=_TD64Unit, default=_TD64Unit) -_BoolOrIntArrayT = TypeVar("_BoolOrIntArrayT", bound=NDArray[integer | np.bool]) +_TD64ItemT_co = TypeVar("_TD64ItemT_co", bound=_TD64Item, default=Any, covariant=True) +_DT64ItemT_co = TypeVar("_DT64ItemT_co", bound=_DT64Item, default=Any, covariant=True) ### Type Aliases (for internal use only) -_Falsy: TypeAlias = L[False, 0] | np.bool[L[False]] -_Truthy: TypeAlias = L[True, 1] | np.bool[L[True]] +type _Falsy = L[False, 0] | bool_[L[False]] +type _Truthy = L[True, 1] | bool_[L[True]] + +type _1D = tuple[int] +type _2D = tuple[int, int] +type _2Tuple[T] = tuple[T, T] + +type _ArrayUInt_co = NDArray[unsignedinteger | bool_] +type _ArrayInt_co = NDArray[integer | bool_] +type _ArrayFloat64_co = NDArray[floating[_64Bit] | float32 | float16 | integer | bool_] +type _ArrayFloat_co = NDArray[floating | integer | bool_] +type _ArrayComplex128_co = NDArray[number[_64Bit] | number[_32Bit] | float16 | integer | bool_] +type _ArrayComplex_co = NDArray[inexact | integer | bool_] +type _ArrayNumber_co = NDArray[number | bool_] +type _ArrayTD64_co = NDArray[timedelta64 | integer | bool_] -_1D: TypeAlias = tuple[int] -_2D: TypeAlias = tuple[int, int] -_2Tuple: TypeAlias = tuple[_T, _T] +type _ArrayString = ndarray[_AnyShape, dtype[str_] | dtypes.StringDType] +type _ArrayNumeric = NDArray[number | timedelta64 | object_] -_ArrayUInt_co: TypeAlias = NDArray[unsignedinteger | np.bool] -_ArrayInt_co: TypeAlias = NDArray[integer | np.bool] -_ArrayFloat64_co: TypeAlias = NDArray[floating[_64Bit] | float32 | float16 | integer | np.bool] -_ArrayFloat_co: TypeAlias = NDArray[floating | integer | np.bool] -_ArrayComplex128_co: TypeAlias = NDArray[number[_64Bit] | number[_32Bit] | float16 | integer | np.bool] -_ArrayComplex_co: TypeAlias = NDArray[inexact | integer | np.bool] -_ArrayNumber_co: TypeAlias = NDArray[number | np.bool] -_ArrayTD64_co: TypeAlias = NDArray[timedelta64 | integer | np.bool] +type _ScalarNotObject = bool_ | number | flexible | datetime64 | timedelta64 -_Float64_co: TypeAlias = float | floating[_64Bit] | float32 | float16 | integer | np.bool -_Complex64_co: TypeAlias = number[_32Bit] | number[_16Bit] | number[_8Bit] | builtins.bool | np.bool -_Complex128_co: TypeAlias = complex | number[_64Bit] | _Complex64_co +type _Float64_co = float | floating[_64Bit] | float32 | float16 | integer | bool_ +type _Complex64_co = number[_32Bit] | number[_16Bit] | number[_8Bit] | py_bool | bool_ +type _Complex128_co = complex | number[_64Bit] | _Complex64_co -_ToIndex: TypeAlias = SupportsIndex | slice | EllipsisType | _ArrayLikeInt_co | None -_ToIndices: TypeAlias = _ToIndex | tuple[_ToIndex, ...] +type _ToIndex = SupportsIndex | slice | EllipsisType | _ArrayLikeInt_co | None +type _ToIndices = _ToIndex | tuple[_ToIndex, ...] -_UnsignedIntegerCType: TypeAlias = type[ +type _UnsignedIntegerCType = type[ ct.c_uint8 | ct.c_uint16 | ct.c_uint32 | ct.c_uint64 | ct.c_ushort | ct.c_uint | ct.c_ulong | ct.c_ulonglong | ct.c_size_t | ct.c_void_p ] # fmt: skip -_SignedIntegerCType: TypeAlias = type[ +type _SignedIntegerCType = type[ ct.c_int8 | ct.c_int16 | ct.c_int32 | ct.c_int64 | ct.c_short | ct.c_int | ct.c_long | ct.c_longlong | ct.c_ssize_t ] # fmt: skip -_FloatingCType: TypeAlias = type[ct.c_float | ct.c_double | ct.c_longdouble] -_IntegerCType: TypeAlias = _UnsignedIntegerCType | _SignedIntegerCType -_NumberCType: TypeAlias = _IntegerCType -_GenericCType: TypeAlias = _NumberCType | type[ct.c_bool | ct.c_char | ct.py_object[Any]] +type _FloatingCType = type[ct.c_float | ct.c_double | ct.c_longdouble] +type _IntegerCType = _UnsignedIntegerCType | _SignedIntegerCType # some commonly used builtin types that are known to result in a # `dtype[object_]`, when their *type* is passed to the `dtype` constructor # NOTE: `builtins.object` should not be included here -_BuiltinObjectLike: TypeAlias = ( +type _BuiltinObjectLike = ( slice | Decimal | Fraction | UUID | dt.date | dt.time | dt.timedelta | dt.tzinfo | tuple[Any, ...] | list[Any] | set[Any] | frozenset[Any] | dict[Any, Any] ) # fmt: skip # Introduce an alias for `dtype` to avoid naming conflicts. -_dtype: TypeAlias = dtype[_ScalarT] +type _dtype[ScalarT: generic] = dtype[ScalarT] -_ByteOrderChar: TypeAlias = L["<", ">", "=", "|"] +type _ByteOrderChar = L["<", ">", "=", "|"] # can be anything, is case-insensitive, and only the first character matters -_ByteOrder: TypeAlias = L[ +type _ByteOrder = L[ "S", # swap the current order (default) "<", "L", "little", # little-endian ">", "B", "big", # big endian "=", "N", "native", # native order "|", "I", # ignore ] # fmt: skip -_DTypeKind: TypeAlias = L[ +type _DTypeKind = L[ "b", # boolean "i", # signed integer "u", # unsigned integer @@ -904,7 +804,7 @@ _DTypeKind: TypeAlias = L[ "V", # void "T", # unicode-string (variable-width) ] -_DTypeChar: TypeAlias = L[ +type _DTypeChar = L[ "?", # bool "b", # byte "B", # ubyte @@ -933,7 +833,7 @@ _DTypeChar: TypeAlias = L[ "c", # bytes_ (S1) "T", # StringDType ] -_DTypeNum: TypeAlias = L[ +type _DTypeNum = L[ 0, # bool 1, # byte 2, # ubyte @@ -962,35 +862,35 @@ _DTypeNum: TypeAlias = L[ 256, # user-defined 2056, # StringDType ] -_DTypeBuiltinKind: TypeAlias = L[0, 1, 2] +type _DTypeBuiltinKind = L[0, 1, 2] -_ArrayAPIVersion: TypeAlias = L["2021.12", "2022.12", "2023.12", "2024.12"] +type _ArrayAPIVersion = L["2021.12", "2022.12", "2023.12", "2024.12"] -_CastingKind: TypeAlias = L["no", "equiv", "safe", "same_kind", "unsafe"] +type _CastingKind = L["no", "equiv", "safe", "same_kind", "same_value", "unsafe"] -_OrderKACF: TypeAlias = L["K", "A", "C", "F"] | None -_OrderACF: TypeAlias = L["A", "C", "F"] | None -_OrderCF: TypeAlias = L["C", "F"] | None +type _OrderKACF = L["K", "A", "C", "F"] | None +type _OrderACF = L["A", "C", "F"] | None +type _OrderCF = L["C", "F"] | None # noqa: PYI047 -_ModeKind: TypeAlias = L["raise", "wrap", "clip"] -_PartitionKind: TypeAlias = L["introselect"] +type _ModeKind = L["raise", "wrap", "clip"] +type _PartitionKind = L["introselect"] # in practice, only the first case-insensitive character is considered (so e.g. # "QuantumSort3000" will be interpreted as quicksort). -_SortKind: TypeAlias = L[ +type _SortKind = L[ "Q", "quick", "quicksort", "M", "merge", "mergesort", "H", "heap", "heapsort", "S", "stable", "stablesort", -] -_SortSide: TypeAlias = L["left", "right"] +] # fmt: skip +type _SortSide = L["left", "right"] -_ConvertibleToInt: TypeAlias = SupportsInt | SupportsIndex | _CharLike_co -_ConvertibleToFloat: TypeAlias = SupportsFloat | SupportsIndex | _CharLike_co -_ConvertibleToComplex: TypeAlias = SupportsComplex | SupportsFloat | SupportsIndex | _CharLike_co -_ConvertibleToTD64: TypeAlias = dt.timedelta | int | _CharLike_co | character | number | timedelta64 | np.bool | None -_ConvertibleToDT64: TypeAlias = dt.date | int | _CharLike_co | character | number | datetime64 | np.bool | None +type _ConvertibleToInt = SupportsInt | SupportsIndex | _CharLike_co +type _ConvertibleToFloat = SupportsFloat | SupportsIndex | _CharLike_co +type _ConvertibleToComplex = SupportsComplex | SupportsFloat | SupportsIndex | _CharLike_co +type _ConvertibleToTD64 = dt.timedelta | int | _CharLike_co | character | number | timedelta64 | bool_ | None +type _ConvertibleToDT64 = dt.date | int | _CharLike_co | character | number | datetime64 | bool_ | None -_NDIterFlagsKind: TypeAlias = L[ +type _NDIterFlagsKind = L[ "buffered", "c_index", "copy_if_overlap", @@ -1005,7 +905,7 @@ _NDIterFlagsKind: TypeAlias = L[ "reduce_ok", "zerosize_ok", ] -_NDIterFlagsOp: TypeAlias = L[ +type _NDIterFlagsOp = L[ "aligned", "allocate", "arraymask", @@ -1020,30 +920,33 @@ _NDIterFlagsOp: TypeAlias = L[ "updateifcopy", "virtual", "writeonly", - "writemasked" + "writemasked", ] -_MemMapModeKind: TypeAlias = L[ +type _MemMapModeKind = L[ "readonly", "r", "copyonwrite", "c", "readwrite", "r+", "write", "w+", -] +] # fmt: skip -_DT64Date: TypeAlias = _HasDateAttributes | L["TODAY", "today", b"TODAY", b"today"] -_DT64Now: TypeAlias = L["NOW", "now", b"NOW", b"now"] -_NaTValue: TypeAlias = L["NAT", "NaT", "nat", b"NAT", b"NaT", b"nat"] - -_MonthUnit: TypeAlias = L["Y", "M", b"Y", b"M"] -_DayUnit: TypeAlias = L["W", "D", b"W", b"D"] -_DateUnit: TypeAlias = L[_MonthUnit, _DayUnit] -_NativeTimeUnit: TypeAlias = L["h", "m", "s", "ms", "us", "Îŧs", b"h", b"m", b"s", b"ms", b"us"] -_IntTimeUnit: TypeAlias = L["ns", "ps", "fs", "as", b"ns", b"ps", b"fs", b"as"] -_TimeUnit: TypeAlias = L[_NativeTimeUnit, _IntTimeUnit] -_NativeTD64Unit: TypeAlias = L[_DayUnit, _NativeTimeUnit] -_IntTD64Unit: TypeAlias = L[_MonthUnit, _IntTimeUnit] -_TD64Unit: TypeAlias = L[_DateUnit, _TimeUnit] -_TimeUnitSpec: TypeAlias = _TD64UnitT | tuple[_TD64UnitT, SupportsIndex] +type _DT64Item = dt.date | int | None +type _TD64Item = dt.timedelta | int | None + +type _DT64Date = _HasDateAttributes | L["TODAY", "today", b"TODAY", b"today"] +type _DT64Now = L["NOW", "now", b"NOW", b"now"] +type _NaTValue = L["NAT", "NaT", "nat", b"NAT", b"NaT", b"nat"] + +type _MonthUnit = L["Y", "M", b"Y", b"M"] +type _DayUnit = L["W", "D", b"W", b"D"] +type _DateUnit = L[_MonthUnit, _DayUnit] +type _NativeTimeUnit = L["h", "m", "s", "ms", "us", "Îŧs", b"h", b"m", b"s", b"ms", b"us"] +type _IntTimeUnit = L["ns", "ps", "fs", "as", b"ns", b"ps", b"fs", b"as"] +type _TimeUnit = L[_NativeTimeUnit, _IntTimeUnit] +type _NativeTD64Unit = L[_DayUnit, _NativeTimeUnit] +type _IntTD64Unit = L[_MonthUnit, _IntTimeUnit] +type _TD64Unit = L[_DateUnit, _TimeUnit] +type _TimeUnitSpec[UnitT: _TD64Unit] = _TD64Unit | tuple[_TD64Unit, SupportsIndex] ### TypedDict's (for internal use only) @@ -1057,6 +960,26 @@ class _FormerAttrsDict(TypedDict): ### Protocols (for internal use only) +@final +@type_check_only +class _SupportsLT(Protocol): + def __lt__(self, other: Any, /) -> Any: ... + +@final +@type_check_only +class _SupportsLE(Protocol): + def __le__(self, other: Any, /) -> Any: ... + +@final +@type_check_only +class _SupportsGT(Protocol): + def __gt__(self, other: Any, /) -> Any: ... + +@final +@type_check_only +class _SupportsGE(Protocol): + def __ge__(self, other: Any, /) -> Any: ... + @type_check_only class _SupportsFileMethods(SupportsFlush, Protocol): # Protocol for representing file-like-objects accepted by `ndarray.tofile` and `fromfile` @@ -1068,34 +991,30 @@ class _SupportsFileMethods(SupportsFlush, Protocol): class _SupportsFileMethodsRW(SupportsWrite[bytes], _SupportsFileMethods, Protocol): ... @type_check_only -class _SupportsItem(Protocol[_T_co]): - def item(self, /) -> _T_co: ... - -@type_check_only -class _SupportsDLPack(Protocol[_T_contra]): - def __dlpack__(self, /, *, stream: _T_contra | None = None) -> CapsuleType: ... +class _SupportsDLPack[StreamT](Protocol): + def __dlpack__(self, /, *, stream: StreamT | None = None) -> CapsuleType: ... @type_check_only -class _HasDType(Protocol[_T_co]): +class _HasDType[DTypeT](Protocol): # DTypeT bound was intentionally left out @property - def dtype(self, /) -> _T_co: ... + def dtype(self, /) -> DTypeT: ... @type_check_only -class _HasRealAndImag(Protocol[_RealT_co, _ImagT_co]): +class _HasRealAndImag[RealT, ImagT](Protocol): @property - def real(self, /) -> _RealT_co: ... + def real(self, /) -> RealT: ... @property - def imag(self, /) -> _ImagT_co: ... + def imag(self, /) -> ImagT: ... @type_check_only -class _HasTypeWithRealAndImag(Protocol[_RealT_co, _ImagT_co]): +class _HasTypeWithRealAndImag[RealT, ImagT](Protocol): @property - def type(self, /) -> type[_HasRealAndImag[_RealT_co, _ImagT_co]]: ... + def type(self, /) -> type[_HasRealAndImag[RealT, ImagT]]: ... @type_check_only -class _HasDTypeWithRealAndImag(Protocol[_RealT_co, _ImagT_co]): +class _HasDTypeWithRealAndImag[RealT, ImagT](Protocol): @property - def dtype(self, /) -> _HasTypeWithRealAndImag[_RealT_co, _ImagT_co]: ... + def dtype(self, /) -> _HasTypeWithRealAndImag[RealT, ImagT]: ... @type_check_only class _HasDateAttributes(Protocol): @@ -1142,9 +1061,9 @@ euler_gamma: Final[float] = ... pi: Final[float] = ... inf: Final[float] = ... nan: Final[float] = ... -little_endian: Final[builtins.bool] = ... -False_: Final[np.bool[L[False]]] = ... -True_: Final[np.bool[L[True]]] = ... +little_endian: Final[py_bool] = ... +False_: Final[bool_[L[False]]] = ... +True_: Final[bool_[L[True]]] = ... newaxis: Final[None] = None # not in __all__ @@ -1169,30 +1088,32 @@ class _DTypeMeta(type): def _legacy(cls, /) -> bool: ... @final -class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): - names: tuple[builtins.str, ...] | None +class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 + names: tuple[str, ...] | None def __hash__(self) -> int: ... # `None` results in the default dtype @overload def __new__( cls, - dtype: type[float64] | None, - align: builtins.bool = ..., - copy: builtins.bool = ..., - metadata: dict[builtins.str, Any] = ... + dtype: type[float64 | ct.c_double] | _Float64Codes | None, + align: py_bool = False, + copy: py_bool = False, + *, + metadata: dict[str, Any] = ... ) -> dtype[float64]: ... # Overload for `dtype` instances, scalar types, and instances that have a - # `dtype: dtype[_ScalarT]` attribute + # `dtype: dtype[ScalarT]` attribute @overload - def __new__( + def __new__[ScalarT: generic]( cls, - dtype: _DTypeLike[_ScalarT], - align: builtins.bool = ..., - copy: builtins.bool = ..., - metadata: dict[builtins.str, Any] = ..., - ) -> dtype[_ScalarT]: ... + dtype: _DTypeLike[ScalarT], + align: py_bool = False, + copy: py_bool = False, + *, + metadata: dict[str, Any] = ..., + ) -> dtype[ScalarT]: ... # Builtin types # @@ -1207,54 +1128,55 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): @overload def __new__( cls, - dtype: type[builtins.bool | np.bool], - align: builtins.bool = ..., - copy: builtins.bool = ..., + dtype: type[py_bool | bool_ | ct.c_bool] | _BoolCodes, + align: py_bool = False, + copy: py_bool = False, + *, metadata: dict[str, Any] = ..., - ) -> dtype[np.bool]: ... - # NOTE: `_: type[int]` also accepts `type[int | bool]` + ) -> dtype[bool_]: ... @overload def __new__( cls, - dtype: type[int | int_ | np.bool], - align: builtins.bool = ..., - copy: builtins.bool = ..., + dtype: type[int], # also accepts `type[py_bool]` + align: py_bool = False, + copy: py_bool = False, + *, metadata: dict[str, Any] = ..., - ) -> dtype[int_ | np.bool]: ... - # NOTE: `_: type[float]` also accepts `type[float | int | bool]` - # NOTE: `float64` inherits from `float` at runtime; but this isn't - # reflected in these stubs. So an explicit `float64` is required here. + ) -> dtype[int_ | Any]: ... @overload def __new__( cls, - dtype: type[float | float64 | int_ | np.bool] | None, - align: builtins.bool = ..., - copy: builtins.bool = ..., + dtype: type[float], # also accepts `type[int | bool]` + align: py_bool = False, + copy: py_bool = False, + *, metadata: dict[str, Any] = ..., - ) -> dtype[float64 | int_ | np.bool]: ... - # NOTE: `_: type[complex]` also accepts `type[complex | float | int | bool]` + ) -> dtype[float64 | Any]: ... @overload def __new__( cls, - dtype: type[complex | complex128 | float64 | int_ | np.bool], - align: builtins.bool = ..., - copy: builtins.bool = ..., + dtype: type[complex], # also accepts `type[float | int | bool]` + align: py_bool = False, + copy: py_bool = False, + *, metadata: dict[str, Any] = ..., - ) -> dtype[complex128 | float64 | int_ | np.bool]: ... + ) -> dtype[complex128 | Any]: ... @overload def __new__( cls, - dtype: type[bytes], # also includes `type[bytes_]` - align: builtins.bool = ..., - copy: builtins.bool = ..., + dtype: type[bytes | ct.c_char] | _BytesCodes, + align: py_bool = False, + copy: py_bool = False, + *, metadata: dict[str, Any] = ..., ) -> dtype[bytes_]: ... @overload def __new__( cls, - dtype: type[str], # also includes `type[str_]` - align: builtins.bool = ..., - copy: builtins.bool = ..., + dtype: type[str] | _StrCodes, + align: py_bool = False, + copy: py_bool = False, + *, metadata: dict[str, Any] = ..., ) -> dtype[str_]: ... # NOTE: These `memoryview` overloads assume PEP 688, which requires mypy to @@ -1266,9 +1188,10 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): @overload def __new__( cls, - dtype: type[memoryview | void], - align: builtins.bool = ..., - copy: builtins.bool = ..., + dtype: type[void | memoryview] | _VoidDTypeLike | _VoidCodes, + align: py_bool = False, + copy: py_bool = False, + *, metadata: dict[str, Any] = ..., ) -> dtype[void]: ... # NOTE: `_: type[object]` would also accept e.g. `type[object | complex]`, @@ -1276,136 +1199,242 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): @overload def __new__( cls, - dtype: type[_BuiltinObjectLike | object_], - align: builtins.bool = ..., - copy: builtins.bool = ..., + dtype: type[object_ | _BuiltinObjectLike | ct.py_object[Any]] | _ObjectCodes, + align: py_bool = False, + copy: py_bool = False, + *, metadata: dict[str, Any] = ..., ) -> dtype[object_]: ... - # Unions of builtins. + # `unsignedinteger` string-based representations and ctypes @overload def __new__( cls, - dtype: type[bytes | str], - align: builtins.bool = ..., - copy: builtins.bool = ..., + dtype: _UInt8Codes | type[ct.c_uint8], + align: py_bool = False, + copy: py_bool = False, + *, metadata: dict[str, Any] = ..., - ) -> dtype[character]: ... + ) -> dtype[uint8]: ... @overload def __new__( cls, - dtype: type[bytes | str | memoryview], - align: builtins.bool = ..., - copy: builtins.bool = ..., + dtype: _UInt16Codes | type[ct.c_uint16 | ct.c_ushort], + align: py_bool = False, + copy: py_bool = False, + *, metadata: dict[str, Any] = ..., - ) -> dtype[flexible]: ... + ) -> dtype[uint16]: ... @overload def __new__( cls, - dtype: type[complex | bytes | str | memoryview | _BuiltinObjectLike], - align: builtins.bool = ..., - copy: builtins.bool = ..., + dtype: _UInt32Codes | _UIntCCodes | type[ct.c_uint32 | ct.c_uint], + align: py_bool = False, + copy: py_bool = False, + *, metadata: dict[str, Any] = ..., - ) -> dtype[np.bool | int_ | float64 | complex128 | flexible | object_]: ... - - # `unsignedinteger` string-based representations and ctypes - @overload - def __new__(cls, dtype: _UInt8Codes | type[ct.c_uint8], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[uint8]: ... - @overload - def __new__(cls, dtype: _UInt16Codes | type[ct.c_uint16], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[uint16]: ... - @overload - def __new__(cls, dtype: _UInt32Codes | type[ct.c_uint32], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[uint32]: ... - @overload - def __new__(cls, dtype: _UInt64Codes | type[ct.c_uint64], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[uint64]: ... - @overload - def __new__(cls, dtype: _UByteCodes | type[ct.c_ubyte], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[ubyte]: ... + ) -> dtype[uint32]: ... @overload - def __new__(cls, dtype: _UShortCodes | type[ct.c_ushort], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[ushort]: ... - @overload - def __new__(cls, dtype: _UIntCCodes | type[ct.c_uint], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[uintc]: ... - # NOTE: We're assuming here that `uint_ptr_t == size_t`, - # an assumption that does not hold in rare cases (same for `ssize_t`) - @overload - def __new__(cls, dtype: _UIntPCodes | type[ct.c_void_p] | type[ct.c_size_t], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[uintp]: ... + def __new__( + cls, + dtype: _UInt64Codes | _ULongLongCodes | type[ct.c_uint64 | ct.c_ulonglong], + align: py_bool = False, + copy: py_bool = False, + *, + metadata: dict[str, Any] = ..., + ) -> dtype[uint64]: ... @overload - def __new__(cls, dtype: _ULongCodes | type[ct.c_ulong], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[ulong]: ... + def __new__( + cls, + dtype: _UIntPCodes | type[ct.c_void_p | ct.c_size_t], + align: py_bool = False, + copy: py_bool = False, + *, + metadata: dict[str, Any] = ..., + ) -> dtype[uintp]: ... @overload - def __new__(cls, dtype: _ULongLongCodes | type[ct.c_ulonglong], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[ulonglong]: ... + def __new__( + cls, + dtype: _ULongCodes | type[ct.c_ulong], + align: py_bool = False, + copy: py_bool = False, + *, + metadata: dict[str, Any] = ..., + ) -> dtype[uint32 | uint64]: ... # `signedinteger` string-based representations and ctypes @overload - def __new__(cls, dtype: _Int8Codes | type[ct.c_int8], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[int8]: ... - @overload - def __new__(cls, dtype: _Int16Codes | type[ct.c_int16], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[int16]: ... - @overload - def __new__(cls, dtype: _Int32Codes | type[ct.c_int32], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[int32]: ... - @overload - def __new__(cls, dtype: _Int64Codes | type[ct.c_int64], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[int64]: ... - @overload - def __new__(cls, dtype: _ByteCodes | type[ct.c_byte], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[byte]: ... + def __new__( + cls, + dtype: _Int8Codes | type[ct.c_int8], + align: py_bool = False, + copy: py_bool = False, + *, + metadata: dict[str, Any] = ..., + ) -> dtype[int8]: ... @overload - def __new__(cls, dtype: _ShortCodes | type[ct.c_short], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[short]: ... + def __new__( + cls, + dtype: _Int16Codes | type[ct.c_int16 | ct.c_short], + align: py_bool = False, + copy: py_bool = False, + *, + metadata: dict[str, Any] = ..., + ) -> dtype[int16]: ... @overload - def __new__(cls, dtype: _IntCCodes | type[ct.c_int], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[intc]: ... + def __new__( + cls, + dtype: _Int32Codes | _IntCCodes | type[ct.c_int32 | ct.c_int], + align: py_bool = False, + copy: py_bool = False, + *, + metadata: dict[str, Any] = ..., + ) -> dtype[int32]: ... @overload - def __new__(cls, dtype: _IntPCodes | type[ct.c_ssize_t], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[intp]: ... + def __new__( + cls, + dtype: _Int64Codes | _LongLongCodes | type[ct.c_int64 | ct.c_longlong], + align: py_bool = False, + copy: py_bool = False, + *, + metadata: dict[str, Any] = ..., + ) -> dtype[int64]: ... @overload - def __new__(cls, dtype: _LongCodes | type[ct.c_long], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[long]: ... + def __new__( + cls, + dtype: _IntPCodes | type[intp | ct.c_ssize_t], + align: py_bool = False, + copy: py_bool = False, + *, + metadata: dict[str, Any] = ..., + ) -> dtype[intp]: ... @overload - def __new__(cls, dtype: _LongLongCodes | type[ct.c_longlong], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[longlong]: ... + def __new__( + cls, + dtype: _LongCodes | type[ct.c_long], + align: py_bool = False, + copy: py_bool = False, + *, + metadata: dict[str, Any] = ..., + ) -> dtype[int32 | int64]: ... # `floating` string-based representations and ctypes @overload - def __new__(cls, dtype: _Float16Codes, align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[float16]: ... - @overload - def __new__(cls, dtype: _Float32Codes, align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[float32]: ... - @overload - def __new__(cls, dtype: _Float64Codes, align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[float64]: ... - @overload - def __new__(cls, dtype: _HalfCodes, align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[half]: ... - @overload - def __new__(cls, dtype: _SingleCodes | type[ct.c_float], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[single]: ... - @overload - def __new__(cls, dtype: _DoubleCodes | type[ct.c_double], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[double]: ... - @overload - def __new__(cls, dtype: _LongDoubleCodes | type[ct.c_longdouble], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[longdouble]: ... - - # `complexfloating` string-based representations - @overload - def __new__(cls, dtype: _Complex64Codes, align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[complex64]: ... - @overload - def __new__(cls, dtype: _Complex128Codes, align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[complex128]: ... - @overload - def __new__(cls, dtype: _CSingleCodes, align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[csingle]: ... + def __new__( + cls, + dtype: _Float16Codes, + align: py_bool = False, + copy: py_bool = False, + *, + metadata: dict[str, Any] = ..., + ) -> dtype[float16]: ... @overload - def __new__(cls, dtype: _CDoubleCodes, align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[cdouble]: ... + def __new__( + cls, + dtype: _Float32Codes | type[ct.c_float], + align: py_bool = False, + copy: py_bool = False, + *, + metadata: dict[str, Any] = ..., + ) -> dtype[float32]: ... + # float64 codes are covered by overload 1 @overload - def __new__(cls, dtype: _CLongDoubleCodes, align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[clongdouble]: ... + def __new__( + cls, + dtype: _LongDoubleCodes | type[ct.c_longdouble], + align: py_bool = False, + copy: py_bool = False, + *, + metadata: dict[str, Any] = ..., + ) -> dtype[longdouble]: ... + + # `complexfloating` string-based representations and ctypes + if sys.version_info >= (3, 14) and sys.platform != "win32": + @overload + def __new__( + cls, + dtype: _Complex64Codes | type[ct.c_float_complex], + align: py_bool = False, + copy: py_bool = False, + *, + metadata: dict[str, Any] = ..., + ) -> dtype[complex64]: ... + @overload + def __new__( + cls, + dtype: _Complex128Codes | type[ct.c_double_complex], + align: py_bool = False, + copy: py_bool = False, + *, + metadata: dict[str, Any] = ..., + ) -> dtype[complex128]: ... + @overload + def __new__( + cls, + dtype: _CLongDoubleCodes | type[ct.c_longdouble_complex], + align: py_bool = False, + copy: py_bool = False, + *, + metadata: dict[str, Any] = ..., + ) -> dtype[clongdouble]: ... + else: + @overload + def __new__( + cls, + dtype: _Complex64Codes, + align: py_bool = False, + copy: py_bool = False, + *, + metadata: dict[str, Any] = ..., + ) -> dtype[complex64]: ... + @overload + def __new__( + cls, + dtype: _Complex128Codes, + align: py_bool = False, + copy: py_bool = False, + *, + metadata: dict[str, Any] = ..., + ) -> dtype[complex128]: ... + @overload + def __new__( + cls, + dtype: _CLongDoubleCodes, + align: py_bool = False, + copy: py_bool = False, + *, + metadata: dict[str, Any] = ..., + ) -> dtype[clongdouble]: ... # Miscellaneous string-based representations and ctypes @overload - def __new__(cls, dtype: _BoolCodes | type[ct.c_bool], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[np.bool]: ... - @overload - def __new__(cls, dtype: _TD64Codes, align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[timedelta64]: ... - @overload - def __new__(cls, dtype: _DT64Codes, align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[datetime64]: ... - @overload - def __new__(cls, dtype: _StrCodes, align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[str_]: ... - @overload - def __new__(cls, dtype: _BytesCodes | type[ct.c_char], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[bytes_]: ... - @overload - def __new__(cls, dtype: _VoidCodes | _VoidDTypeLike, align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[void]: ... + def __new__( + cls, + dtype: _TD64Codes, + align: py_bool = False, + copy: py_bool = False, + *, + metadata: dict[str, Any] = ..., + ) -> dtype[timedelta64]: ... @overload - def __new__(cls, dtype: _ObjectCodes | type[ct.py_object[Any]], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[object_]: ... + def __new__( + cls, + dtype: _DT64Codes, + align: py_bool = False, + copy: py_bool = False, + *, + metadata: dict[str, Any] = ..., + ) -> dtype[datetime64]: ... # `StringDType` requires special treatment because it has no scalar type @overload def __new__( cls, dtype: dtypes.StringDType | _StringCodes, - align: builtins.bool = ..., - copy: builtins.bool = ..., - metadata: dict[builtins.str, Any] = ... + align: py_bool = False, + copy: py_bool = False, + *, + metadata: dict[str, Any] = ..., ) -> dtypes.StringDType: ... # Combined char-codes and ctypes, analogous to the scalar-type hierarchy @@ -1413,119 +1442,105 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): def __new__( cls, dtype: _UnsignedIntegerCodes | _UnsignedIntegerCType, - align: builtins.bool = ..., - copy: builtins.bool = ..., - metadata: dict[builtins.str, Any] = ..., + align: py_bool = False, + copy: py_bool = False, + *, + metadata: dict[str, Any] = ..., ) -> dtype[unsignedinteger]: ... @overload def __new__( cls, dtype: _SignedIntegerCodes | _SignedIntegerCType, - align: builtins.bool = ..., - copy: builtins.bool = ..., - metadata: dict[builtins.str, Any] = ..., + align: py_bool = False, + copy: py_bool = False, + *, + metadata: dict[str, Any] = ..., ) -> dtype[signedinteger]: ... @overload def __new__( cls, dtype: _IntegerCodes | _IntegerCType, - align: builtins.bool = ..., - copy: builtins.bool = ..., - metadata: dict[builtins.str, Any] = ..., + align: py_bool = False, + copy: py_bool = False, + *, + metadata: dict[str, Any] = ..., ) -> dtype[integer]: ... @overload def __new__( cls, dtype: _FloatingCodes | _FloatingCType, - align: builtins.bool = ..., - copy: builtins.bool = ..., - metadata: dict[builtins.str, Any] = ..., + align: py_bool = False, + copy: py_bool = False, + *, + metadata: dict[str, Any] = ..., ) -> dtype[floating]: ... @overload def __new__( cls, dtype: _ComplexFloatingCodes, - align: builtins.bool = ..., - copy: builtins.bool = ..., - metadata: dict[builtins.str, Any] = ..., + align: py_bool = False, + copy: py_bool = False, + *, + metadata: dict[str, Any] = ..., ) -> dtype[complexfloating]: ... @overload def __new__( cls, dtype: _InexactCodes | _FloatingCType, - align: builtins.bool = ..., - copy: builtins.bool = ..., - metadata: dict[builtins.str, Any] = ..., + align: py_bool = False, + copy: py_bool = False, + *, + metadata: dict[str, Any] = ..., ) -> dtype[inexact]: ... @overload def __new__( cls, - dtype: _NumberCodes | _NumberCType, - align: builtins.bool = ..., - copy: builtins.bool = ..., - metadata: dict[builtins.str, Any] = ..., - ) -> dtype[number]: ... - @overload - def __new__( - cls, - dtype: _CharacterCodes | type[ct.c_char], - align: builtins.bool = ..., - copy: builtins.bool = ..., - metadata: dict[builtins.str, Any] = ..., + dtype: _CharacterCodes | type[bytes | str | ct.c_char], + align: py_bool = False, + copy: py_bool = False, + *, + metadata: dict[str, Any] = ..., ) -> dtype[character]: ... - @overload - def __new__( - cls, - dtype: _FlexibleCodes | type[ct.c_char], - align: builtins.bool = ..., - copy: builtins.bool = ..., - metadata: dict[builtins.str, Any] = ..., - ) -> dtype[flexible]: ... - @overload - def __new__( - cls, - dtype: _GenericCodes | _GenericCType, - align: builtins.bool = ..., - copy: builtins.bool = ..., - metadata: dict[builtins.str, Any] = ..., - ) -> dtype[generic]: ... # Handle strings that can't be expressed as literals; i.e. "S1", "S2", ... @overload def __new__( cls, - dtype: builtins.str, - align: builtins.bool = ..., - copy: builtins.bool = ..., - metadata: dict[builtins.str, Any] = ..., + dtype: str, + align: py_bool = False, + copy: py_bool = False, + *, + metadata: dict[str, Any] = ..., ) -> dtype: ... # Catch-all overload for object-likes - # NOTE: `object_ | Any` is *not* equivalent to `Any` -- it describes some - # (static) type `T` s.t. `object_ <: T <: builtins.object` (`<:` denotes - # the subtyping relation, the (gradual) typing analogue of `issubclass()`). - # https://typing.readthedocs.io/en/latest/spec/concepts.html#union-types + # NOTE: `object_ | Any` is NOT equivalent to `Any`. It is specified to behave + # like a "sum type" (a.k.a. variant type, discriminated union, or tagged union). + # So the union of a type and `Any` is not the same "union type" that all other + # unions are (by definition). + # https://typing.python.org/en/latest/spec/concepts.html#union-types @overload def __new__( cls, dtype: type[object], - align: builtins.bool = ..., - copy: builtins.bool = ..., - metadata: dict[builtins.str, Any] = ..., + align: py_bool = False, + copy: py_bool = False, + *, + metadata: dict[str, Any] = ..., ) -> dtype[object_ | Any]: ... def __class_getitem__(cls, item: Any, /) -> GenericAlias: ... @overload - def __getitem__(self: dtype[void], key: list[builtins.str], /) -> dtype[void]: ... + def __getitem__(self: dtype[void], key: list[str], /) -> dtype[void]: ... @overload - def __getitem__(self: dtype[void], key: builtins.str | SupportsIndex, /) -> dtype: ... + def __getitem__(self: dtype[void], key: str | SupportsIndex, /) -> dtype: ... # NOTE: In the future 1-based multiplications will also yield `flexible` dtypes @overload - def __mul__(self: _DTypeT, value: L[1], /) -> _DTypeT: ... + def __mul__[DTypeT: dtype](self: DTypeT, value: L[1], /) -> DTypeT: ... @overload - def __mul__(self: _FlexDTypeT, value: SupportsIndex, /) -> _FlexDTypeT: ... + def __mul__[FlexibleDTypeT: dtype[flexible]](self: FlexibleDTypeT, value: SupportsIndex, /) -> FlexibleDTypeT: ... @overload def __mul__(self, value: SupportsIndex, /) -> dtype[void]: ... @@ -1533,20 +1548,20 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # literals as of mypy 0.902. Set the return-type to `dtype` for # now for non-flexible dtypes. @overload - def __rmul__(self: _FlexDTypeT, value: SupportsIndex, /) -> _FlexDTypeT: ... + def __rmul__[FlexibleDTypeT: dtype[flexible]](self: FlexibleDTypeT, value: SupportsIndex, /) -> FlexibleDTypeT: ... @overload def __rmul__(self, value: SupportsIndex, /) -> dtype: ... - def __gt__(self, other: DTypeLike, /) -> builtins.bool: ... - def __ge__(self, other: DTypeLike, /) -> builtins.bool: ... - def __lt__(self, other: DTypeLike, /) -> builtins.bool: ... - def __le__(self, other: DTypeLike, /) -> builtins.bool: ... + def __gt__(self, other: DTypeLike | None, /) -> py_bool: ... + def __ge__(self, other: DTypeLike | None, /) -> py_bool: ... + def __lt__(self, other: DTypeLike | None, /) -> py_bool: ... + def __le__(self, other: DTypeLike | None, /) -> py_bool: ... # Explicitly defined `__eq__` and `__ne__` to get around mypy's # `strict_equality` option; even though their signatures are # identical to their `object`-based counterpart - def __eq__(self, other: Any, /) -> builtins.bool: ... - def __ne__(self, other: Any, /) -> builtins.bool: ... + def __eq__(self, other: Any, /) -> py_bool: ... + def __ne__(self, other: Any, /) -> py_bool: ... @property def alignment(self) -> int: ... @@ -1563,19 +1578,19 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): @property def flags(self) -> int: ... @property - def hasobject(self) -> builtins.bool: ... + def hasobject(self) -> py_bool: ... @property def isbuiltin(self) -> _DTypeBuiltinKind: ... @property - def isnative(self) -> builtins.bool: ... + def isnative(self) -> py_bool: ... @property - def isalignedstruct(self) -> builtins.bool: ... + def isalignedstruct(self) -> py_bool: ... @property def itemsize(self) -> int: ... @property def kind(self) -> _DTypeKind: ... @property - def metadata(self) -> MappingProxyType[builtins.str, Any] | None: ... + def metadata(self) -> MappingProxyType[str, Any] | None: ... @property def name(self) -> LiteralString: ... @property @@ -1594,44 +1609,61 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): @final class flatiter(Generic[_ArrayT_co]): - __hash__: ClassVar[None] + __hash__: ClassVar[None] = None # type: ignore[assignment] # pyright: ignore[reportIncompatibleMethodOverride] + @property - def base(self) -> _ArrayT_co: ... + def base(self, /) -> _ArrayT_co: ... @property - def coords(self) -> _Shape: ... + def coords[ShapeT: _Shape](self: flatiter[ndarray[ShapeT]], /) -> ShapeT: ... @property - def index(self) -> int: ... - def copy(self) -> _ArrayT_co: ... - def __iter__(self) -> Self: ... - def __next__(self: flatiter[NDArray[_ScalarT]]) -> _ScalarT: ... - def __len__(self) -> int: ... - @overload - def __getitem__( - self: flatiter[NDArray[_ScalarT]], - key: int | integer | tuple[int | integer], - ) -> _ScalarT: ... - @overload - def __getitem__( - self, - key: _ArrayLikeInt | slice | EllipsisType | tuple[_ArrayLikeInt | slice | EllipsisType], - ) -> _ArrayT_co: ... - # TODO: `__setitem__` operates via `unsafe` casting rules, and can - # thus accept any type accepted by the relevant underlying `np.generic` - # constructor. - # This means that `value` must in reality be a supertype of `npt.ArrayLike`. - def __setitem__( - self, - key: _ArrayLikeInt | slice | EllipsisType | tuple[_ArrayLikeInt | slice | EllipsisType], - value: Any, - ) -> None: ... - @overload - def __array__(self: flatiter[ndarray[_1DShapeT, _DTypeT]], dtype: None = ..., /) -> ndarray[_1DShapeT, _DTypeT]: ... - @overload - def __array__(self: flatiter[ndarray[_1DShapeT, Any]], dtype: _DTypeT, /) -> ndarray[_1DShapeT, _DTypeT]: ... - @overload - def __array__(self: flatiter[ndarray[Any, _DTypeT]], dtype: None = ..., /) -> ndarray[_AnyShape, _DTypeT]: ... - @overload - def __array__(self, dtype: _DTypeT, /) -> ndarray[_AnyShape, _DTypeT]: ... + def index(self, /) -> int: ... + + # iteration + def __len__(self, /) -> int: ... + def __iter__(self, /) -> Self: ... + def __next__[ScalarT: generic](self: flatiter[NDArray[ScalarT]], /) -> ScalarT: ... + + # indexing + @overload # nd: _[()] + def __getitem__(self, key: tuple[()], /) -> _ArrayT_co: ... + @overload # 0d; _[] + def __getitem__[ScalarT: generic](self: flatiter[NDArray[ScalarT]], key: int | integer, /) -> ScalarT: ... + @overload # 1d; _[[*]], _[:], _[...] + def __getitem__[DTypeT: dtype]( + self: flatiter[ndarray[Any, DTypeT]], + key: list[int] | slice | EllipsisType | flatiter[NDArray[integer]], + /, + ) -> ndarray[tuple[int], DTypeT]: ... + @overload # 2d; _[[*[*]]] + def __getitem__[DTypeT: dtype]( + self: flatiter[ndarray[Any, DTypeT]], + key: list[list[int]], + /, + ) -> ndarray[tuple[int, int], DTypeT]: ... + @overload # ?d + def __getitem__[DTypeT: dtype]( + self: flatiter[ndarray[Any, DTypeT]], + key: NDArray[integer] | _NestedSequence[int], + /, + ) -> ndarray[_AnyShape, DTypeT]: ... + + # NOTE: `__setitem__` operates via `unsafe` casting rules, and can thus accept any + # type accepted by the relevant underlying `np.generic` constructor, which isn't + # known statically. So we cannot meaningfully annotate the value parameter. + def __setitem__(self, key: slice | EllipsisType | _ArrayLikeInt, val: object, /) -> None: ... + + # NOTE: `dtype` and `copy` are no-ops at runtime, so we don't support them here to + # avoid confusion + def __array__[DTypeT: dtype]( + self: flatiter[ndarray[Any, DTypeT]], + dtype: None = None, + /, + *, + copy: None = None, + ) -> ndarray[tuple[int], DTypeT]: ... + + # This returns a flat copy of the underlying array, not of the iterator itself + def copy[DTypeT: dtype](self: flatiter[ndarray[Any, DTypeT]], /) -> ndarray[tuple[int], DTypeT]: ... @type_check_only class _ArrayOrScalarCommon: @@ -1654,7 +1686,7 @@ class _ArrayOrScalarCommon: @property def device(self) -> L["cpu"]: ... - def __bool__(self, /) -> builtins.bool: ... + def __bool__(self, /) -> py_bool: ... def __int__(self, /) -> int: ... def __float__(self, /) -> float: ... def __copy__(self) -> Self: ... @@ -1669,23 +1701,37 @@ class _ArrayOrScalarCommon: def dump(self, file: StrOrBytesPath | SupportsWrite[bytes]) -> None: ... def dumps(self) -> bytes: ... def tobytes(self, order: _OrderKACF = ...) -> bytes: ... - def tofile(self, fid: StrOrBytesPath | _SupportsFileMethods, sep: str = ..., format: str = ...) -> None: ... + def tofile(self, fid: StrOrBytesPath | _SupportsFileMethods, /, sep: str = "", format: str = "%s") -> None: ... # generics and 0d arrays return builtin scalars def tolist(self) -> Any: ... def to_device(self, device: L["cpu"], /, *, stream: int | Any | None = ...) -> Self: ... + # NOTE: for `generic`, these two methods don't do anything + def fill(self, /, value: Incomplete) -> None: ... + def put(self, indices: _ArrayLikeInt_co, values: ArrayLike, /, mode: _ModeKind = "raise") -> None: ... + + # NOTE: even on `generic` this seems to work + def setflags( + self, + /, + *, + write: py_bool | None = None, + align: py_bool | None = None, + uic: py_bool | None = None, + ) -> None: ... + @property def __array_interface__(self) -> dict[str, Any]: ... @property def __array_priority__(self) -> float: ... @property - def __array_struct__(self) -> CapsuleType: ... # builtins.PyCapsule + def __array_struct__(self) -> CapsuleType: ... def __array_namespace__(self, /, *, api_version: _ArrayAPIVersion | None = None) -> ModuleType: ... def __setstate__(self, state: tuple[ SupportsIndex, # version _ShapeLike, # Shape _DTypeT_co, # DType - np.bool, # F-continuous + bool_, # F-continuous bytes | list[Any], # Data ], /) -> None: ... @@ -1698,38 +1744,47 @@ class _ArrayOrScalarCommon: kind: _SortKind | None = ..., order: str | Sequence[str] | None = ..., *, - stable: bool | None = ..., - ) -> NDArray[Any]: ... + stable: py_bool | None = ..., + ) -> NDArray[intp]: ... @overload # axis=None (default), out=None (default), keepdims=False (default) def argmax(self, /, axis: None = None, out: None = None, *, keepdims: L[False] = False) -> intp: ... @overload # axis=index, out=None (default) - def argmax(self, /, axis: SupportsIndex, out: None = None, *, keepdims: builtins.bool = False) -> Any: ... + def argmax(self, /, axis: SupportsIndex, out: None = None, *, keepdims: py_bool = False) -> Any: ... @overload # axis=index, out=ndarray - def argmax(self, /, axis: SupportsIndex | None, out: _BoolOrIntArrayT, *, keepdims: builtins.bool = False) -> _BoolOrIntArrayT: ... + def argmax[OutT: _ArrayInt_co]( + self, /, axis: SupportsIndex | None, out: OutT, *, keepdims: py_bool = False + ) -> OutT: ... @overload - def argmax(self, /, axis: SupportsIndex | None = None, *, out: _BoolOrIntArrayT, keepdims: builtins.bool = False) -> _BoolOrIntArrayT: ... + def argmax[OutT: _ArrayInt_co]( + self, /, axis: SupportsIndex | None = None, *, out: OutT, keepdims: py_bool = False + ) -> OutT: ... @overload # axis=None (default), out=None (default), keepdims=False (default) def argmin(self, /, axis: None = None, out: None = None, *, keepdims: L[False] = False) -> intp: ... @overload # axis=index, out=None (default) - def argmin(self, /, axis: SupportsIndex, out: None = None, *, keepdims: builtins.bool = False) -> Any: ... + def argmin(self, /, axis: SupportsIndex, out: None = None, *, keepdims: py_bool = False) -> Any: ... @overload # axis=index, out=ndarray - def argmin(self, /, axis: SupportsIndex | None, out: _BoolOrIntArrayT, *, keepdims: builtins.bool = False) -> _BoolOrIntArrayT: ... + def argmin[OutT: _ArrayInt_co]( + self, /, axis: SupportsIndex | None, out: OutT, *, keepdims: py_bool = False + ) -> OutT: ... @overload - def argmin(self, /, axis: SupportsIndex | None = None, *, out: _BoolOrIntArrayT, keepdims: builtins.bool = False) -> _BoolOrIntArrayT: ... + def argmin[OutT: _ArrayInt_co]( + self, /, axis: SupportsIndex | None = None, *, out: OutT, keepdims: py_bool = False + ) -> OutT: ... + # Keep in sync with `MaskedArray.round` @overload # out=None (default) def round(self, /, decimals: SupportsIndex = 0, out: None = None) -> Self: ... @overload # out=ndarray - def round(self, /, decimals: SupportsIndex, out: _ArrayT) -> _ArrayT: ... + def round[ArrayT: ndarray](self, /, decimals: SupportsIndex, out: ArrayT) -> ArrayT: ... @overload - def round(self, /, decimals: SupportsIndex = 0, *, out: _ArrayT) -> _ArrayT: ... + def round[ArrayT: ndarray](self, /, decimals: SupportsIndex = 0, *, out: ArrayT) -> ArrayT: ... @overload # out=None (default) def choose(self, /, choices: ArrayLike, out: None = None, mode: _ModeKind = "raise") -> NDArray[Any]: ... @overload # out=ndarray - def choose(self, /, choices: ArrayLike, out: _ArrayT, mode: _ModeKind = "raise") -> _ArrayT: ... + def choose[ArrayT: ndarray](self, /, choices: ArrayLike, out: ArrayT, mode: _ModeKind = "raise") -> ArrayT: ... # TODO: Annotate kwargs with an unpacked `TypedDict` @overload # out: None (default) @@ -1739,34 +1794,42 @@ class _ArrayOrScalarCommon: @overload def clip(self, /, min: None = None, *, max: ArrayLike, out: None = None, **kwargs: Any) -> NDArray[Any]: ... @overload # out: ndarray - def clip(self, /, min: ArrayLike, max: ArrayLike | None, out: _ArrayT, **kwargs: Any) -> _ArrayT: ... + def clip[ArrayT: ndarray](self, /, min: ArrayLike, max: ArrayLike | None, out: ArrayT, **kwargs: Any) -> ArrayT: ... @overload - def clip(self, /, min: ArrayLike, max: ArrayLike | None = None, *, out: _ArrayT, **kwargs: Any) -> _ArrayT: ... + def clip[ArrayT: ndarray](self, /, min: ArrayLike, max: ArrayLike | None = None, *, out: ArrayT, **kwargs: Any) -> ArrayT: ... @overload - def clip(self, /, min: None, max: ArrayLike, out: _ArrayT, **kwargs: Any) -> _ArrayT: ... + def clip[ArrayT: ndarray](self, /, min: None, max: ArrayLike, out: ArrayT, **kwargs: Any) -> ArrayT: ... @overload - def clip(self, /, min: None = None, *, max: ArrayLike, out: _ArrayT, **kwargs: Any) -> _ArrayT: ... + def clip[ArrayT: ndarray](self, /, min: None = None, *, max: ArrayLike, out: ArrayT, **kwargs: Any) -> ArrayT: ... @overload def compress(self, /, condition: _ArrayLikeInt_co, axis: SupportsIndex | None = None, out: None = None) -> NDArray[Any]: ... @overload - def compress(self, /, condition: _ArrayLikeInt_co, axis: SupportsIndex | None, out: _ArrayT) -> _ArrayT: ... + def compress[ArrayT: ndarray](self, /, condition: _ArrayLikeInt_co, axis: SupportsIndex | None, out: ArrayT) -> ArrayT: ... @overload - def compress(self, /, condition: _ArrayLikeInt_co, axis: SupportsIndex | None = None, *, out: _ArrayT) -> _ArrayT: ... + def compress[ArrayT: ndarray]( + self, /, condition: _ArrayLikeInt_co, axis: SupportsIndex | None = None, *, out: ArrayT + ) -> ArrayT: ... + # Keep in sync with `MaskedArray.cumprod` @overload # out: None (default) def cumprod(self, /, axis: SupportsIndex | None = None, dtype: DTypeLike | None = None, out: None = None) -> NDArray[Any]: ... @overload # out: ndarray - def cumprod(self, /, axis: SupportsIndex | None, dtype: DTypeLike | None, out: _ArrayT) -> _ArrayT: ... + def cumprod[ArrayT: ndarray](self, /, axis: SupportsIndex | None, dtype: DTypeLike | None, out: ArrayT) -> ArrayT: ... @overload - def cumprod(self, /, axis: SupportsIndex | None = None, dtype: DTypeLike | None = None, *, out: _ArrayT) -> _ArrayT: ... + def cumprod[ArrayT: ndarray]( + self, /, axis: SupportsIndex | None = None, dtype: DTypeLike | None = None, *, out: ArrayT + ) -> ArrayT: ... + # Keep in sync with `MaskedArray.cumsum` @overload # out: None (default) def cumsum(self, /, axis: SupportsIndex | None = None, dtype: DTypeLike | None = None, out: None = None) -> NDArray[Any]: ... @overload # out: ndarray - def cumsum(self, /, axis: SupportsIndex | None, dtype: DTypeLike | None, out: _ArrayT) -> _ArrayT: ... + def cumsum[ArrayT: ndarray](self, /, axis: SupportsIndex | None, dtype: DTypeLike | None, out: ArrayT) -> ArrayT: ... @overload - def cumsum(self, /, axis: SupportsIndex | None = None, dtype: DTypeLike | None = None, *, out: _ArrayT) -> _ArrayT: ... + def cumsum[ArrayT: ndarray]( + self, /, axis: SupportsIndex | None = None, dtype: DTypeLike | None = None, *, out: ArrayT + ) -> ArrayT: ... @overload def max( @@ -1774,31 +1837,33 @@ class _ArrayOrScalarCommon: /, axis: _ShapeLike | None = None, out: None = None, - keepdims: builtins.bool = False, - initial: _NumberLike_co = ..., - where: _ArrayLikeBool_co = True, + *, + keepdims: py_bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., ) -> Any: ... @overload - def max( + def max[ArrayT: ndarray]( self, /, axis: _ShapeLike | None, - out: _ArrayT, - keepdims: builtins.bool = False, - initial: _NumberLike_co = ..., - where: _ArrayLikeBool_co = True, - ) -> _ArrayT: ... + out: ArrayT, + *, + keepdims: py_bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + ) -> ArrayT: ... @overload - def max( + def max[ArrayT: ndarray]( self, /, axis: _ShapeLike | None = None, *, - out: _ArrayT, - keepdims: builtins.bool = False, - initial: _NumberLike_co = ..., - where: _ArrayLikeBool_co = True, - ) -> _ArrayT: ... + out: ArrayT, + keepdims: py_bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + ) -> ArrayT: ... @overload def min( @@ -1806,31 +1871,33 @@ class _ArrayOrScalarCommon: /, axis: _ShapeLike | None = None, out: None = None, - keepdims: builtins.bool = False, - initial: _NumberLike_co = ..., - where: _ArrayLikeBool_co = True, + *, + keepdims: py_bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., ) -> Any: ... @overload - def min( + def min[ArrayT: ndarray]( self, /, axis: _ShapeLike | None, - out: _ArrayT, - keepdims: builtins.bool = False, - initial: _NumberLike_co = ..., - where: _ArrayLikeBool_co = True, - ) -> _ArrayT: ... + out: ArrayT, + *, + keepdims: py_bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + ) -> ArrayT: ... @overload - def min( + def min[ArrayT: ndarray]( self, /, axis: _ShapeLike | None = None, *, - out: _ArrayT, - keepdims: builtins.bool = False, - initial: _NumberLike_co = ..., - where: _ArrayLikeBool_co = True, - ) -> _ArrayT: ... + out: ArrayT, + keepdims: py_bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + ) -> ArrayT: ... @overload def sum( @@ -1839,33 +1906,35 @@ class _ArrayOrScalarCommon: axis: _ShapeLike | None = None, dtype: DTypeLike | None = None, out: None = None, - keepdims: builtins.bool = False, - initial: _NumberLike_co = 0, - where: _ArrayLikeBool_co = True, + *, + keepdims: py_bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., ) -> Any: ... @overload - def sum( + def sum[ArrayT: ndarray]( self, /, axis: _ShapeLike | None, dtype: DTypeLike | None, - out: _ArrayT, - keepdims: builtins.bool = False, - initial: _NumberLike_co = 0, - where: _ArrayLikeBool_co = True, - ) -> _ArrayT: ... + out: ArrayT, + *, + keepdims: py_bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + ) -> ArrayT: ... @overload - def sum( + def sum[ArrayT: ndarray]( self, /, axis: _ShapeLike | None = None, dtype: DTypeLike | None = None, *, - out: _ArrayT, - keepdims: builtins.bool = False, - initial: _NumberLike_co = 0, - where: _ArrayLikeBool_co = True, - ) -> _ArrayT: ... + out: ArrayT, + keepdims: py_bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + ) -> ArrayT: ... @overload def prod( @@ -1874,33 +1943,35 @@ class _ArrayOrScalarCommon: axis: _ShapeLike | None = None, dtype: DTypeLike | None = None, out: None = None, - keepdims: builtins.bool = False, - initial: _NumberLike_co = 1, - where: _ArrayLikeBool_co = True, + *, + keepdims: py_bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., ) -> Any: ... @overload - def prod( + def prod[ArrayT: ndarray]( self, /, axis: _ShapeLike | None, dtype: DTypeLike | None, - out: _ArrayT, - keepdims: builtins.bool = False, - initial: _NumberLike_co = 1, - where: _ArrayLikeBool_co = True, - ) -> _ArrayT: ... + out: ArrayT, + *, + keepdims: py_bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + ) -> ArrayT: ... @overload - def prod( + def prod[ArrayT: ndarray]( self, /, axis: _ShapeLike | None = None, dtype: DTypeLike | None = None, *, - out: _ArrayT, - keepdims: builtins.bool = False, - initial: _NumberLike_co = 1, - where: _ArrayLikeBool_co = True, - ) -> _ArrayT: ... + out: ArrayT, + keepdims: py_bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + ) -> ArrayT: ... @overload def mean( @@ -1908,32 +1979,32 @@ class _ArrayOrScalarCommon: axis: _ShapeLike | None = None, dtype: DTypeLike | None = None, out: None = None, - keepdims: builtins.bool = False, *, - where: _ArrayLikeBool_co = True, + keepdims: py_bool | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., ) -> Any: ... @overload - def mean( + def mean[ArrayT: ndarray]( self, /, axis: _ShapeLike | None, dtype: DTypeLike | None, - out: _ArrayT, - keepdims: builtins.bool = False, + out: ArrayT, *, - where: _ArrayLikeBool_co = True, - ) -> _ArrayT: ... + keepdims: py_bool | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + ) -> ArrayT: ... @overload - def mean( + def mean[ArrayT: ndarray]( self, /, axis: _ShapeLike | None = None, dtype: DTypeLike | None = None, *, - out: _ArrayT, - keepdims: builtins.bool = False, - where: _ArrayLikeBool_co = True, - ) -> _ArrayT: ... + out: ArrayT, + keepdims: py_bool | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + ) -> ArrayT: ... @overload def std( @@ -1942,38 +2013,38 @@ class _ArrayOrScalarCommon: dtype: DTypeLike | None = None, out: None = None, ddof: float = 0, - keepdims: builtins.bool = False, *, - where: _ArrayLikeBool_co = True, - mean: _ArrayLikeNumber_co = ..., - correction: float = ..., + keepdims: py_bool | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + mean: _ArrayLikeNumber_co | _NoValueType = ..., + correction: float | _NoValueType = ..., ) -> Any: ... @overload - def std( + def std[ArrayT: ndarray]( self, axis: _ShapeLike | None, dtype: DTypeLike | None, - out: _ArrayT, + out: ArrayT, ddof: float = 0, - keepdims: builtins.bool = False, *, - where: _ArrayLikeBool_co = True, - mean: _ArrayLikeNumber_co = ..., - correction: float = ..., - ) -> _ArrayT: ... + keepdims: py_bool | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + mean: _ArrayLikeNumber_co | _NoValueType = ..., + correction: float | _NoValueType = ..., + ) -> ArrayT: ... @overload - def std( + def std[ArrayT: ndarray]( self, axis: _ShapeLike | None = None, dtype: DTypeLike | None = None, *, - out: _ArrayT, + out: ArrayT, ddof: float = 0, - keepdims: builtins.bool = False, - where: _ArrayLikeBool_co = True, - mean: _ArrayLikeNumber_co = ..., - correction: float = ..., - ) -> _ArrayT: ... + keepdims: py_bool | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + mean: _ArrayLikeNumber_co | _NoValueType = ..., + correction: float | _NoValueType = ..., + ) -> ArrayT: ... @overload def var( @@ -1982,38 +2053,38 @@ class _ArrayOrScalarCommon: dtype: DTypeLike | None = None, out: None = None, ddof: float = 0, - keepdims: builtins.bool = False, *, - where: _ArrayLikeBool_co = True, - mean: _ArrayLikeNumber_co = ..., - correction: float = ..., + keepdims: py_bool | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + mean: _ArrayLikeNumber_co | _NoValueType = ..., + correction: float | _NoValueType = ..., ) -> Any: ... @overload - def var( + def var[ArrayT: ndarray]( self, axis: _ShapeLike | None, dtype: DTypeLike | None, - out: _ArrayT, + out: ArrayT, ddof: float = 0, - keepdims: builtins.bool = False, *, - where: _ArrayLikeBool_co = True, - mean: _ArrayLikeNumber_co = ..., - correction: float = ..., - ) -> _ArrayT: ... + keepdims: py_bool | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + mean: _ArrayLikeNumber_co | _NoValueType = ..., + correction: float | _NoValueType = ..., + ) -> ArrayT: ... @overload - def var( + def var[ArrayT: ndarray]( self, axis: _ShapeLike | None = None, dtype: DTypeLike | None = None, *, - out: _ArrayT, + out: ArrayT, ddof: float = 0, - keepdims: builtins.bool = False, - where: _ArrayLikeBool_co = True, - mean: _ArrayLikeNumber_co = ..., - correction: float = ..., - ) -> _ArrayT: ... + keepdims: py_bool | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + mean: _ArrayLikeNumber_co | _NoValueType = ..., + correction: float | _NoValueType = ..., + ) -> ArrayT: ... class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): __hash__: ClassVar[None] # type: ignore[assignment] # pyright: ignore[reportIncompatibleMethodOverride] @@ -2023,38 +2094,35 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): def ndim(self) -> int: ... @property def size(self) -> int: ... + @property - def real(self: _HasDTypeWithRealAndImag[_ScalarT, object], /) -> ndarray[_ShapeT_co, dtype[_ScalarT]]: ... + def real[ScalarT: generic](self: _HasDTypeWithRealAndImag[ScalarT, object], /) -> ndarray[_ShapeT_co, dtype[ScalarT]]: ... @real.setter def real(self, value: ArrayLike, /) -> None: ... + @property - def imag(self: _HasDTypeWithRealAndImag[object, _ScalarT], /) -> ndarray[_ShapeT_co, dtype[_ScalarT]]: ... + def imag[ScalarT: generic](self: _HasDTypeWithRealAndImag[object, ScalarT], /) -> ndarray[_ShapeT_co, dtype[ScalarT]]: ... @imag.setter def imag(self, value: ArrayLike, /) -> None: ... def __new__( cls, shape: _ShapeLike, - dtype: DTypeLike = ..., - buffer: _SupportsBuffer | None = ..., + dtype: DTypeLike | None = ..., + buffer: Buffer | None = ..., offset: SupportsIndex = ..., strides: _ShapeLike | None = ..., order: _OrderKACF = ..., ) -> Self: ... - if sys.version_info >= (3, 12): - def __buffer__(self, flags: int, /) -> memoryview: ... + def __buffer__(self, flags: int, /) -> memoryview: ... def __class_getitem__(cls, item: Any, /) -> GenericAlias: ... @overload - def __array__( - self, dtype: None = ..., /, *, copy: bool | None = ... - ) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + def __array__(self, dtype: None = None, /, *, copy: py_bool | None = None) -> ndarray[_ShapeT_co, _DTypeT_co]: ... @overload - def __array__( - self, dtype: _DTypeT, /, *, copy: bool | None = ... - ) -> ndarray[_ShapeT_co, _DTypeT]: ... + def __array__[DTypeT: dtype](self, dtype: DTypeT, /, *, copy: py_bool | None = None) -> ndarray[_ShapeT_co, DTypeT]: ... def __array_ufunc__( self, @@ -2077,28 +2145,29 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): # grant subclasses a bit more flexibility def __array_finalize__(self, obj: NDArray[Any] | None, /) -> None: ... - def __array_wrap__( + def __array_wrap__[ShapeT: _Shape, DTypeT: dtype]( self, - array: ndarray[_ShapeT, _DTypeT], + array: ndarray[ShapeT, DTypeT], context: tuple[ufunc, tuple[Any, ...], int] | None = ..., - return_scalar: builtins.bool = ..., + return_scalar: py_bool = ..., /, - ) -> ndarray[_ShapeT, _DTypeT]: ... + ) -> ndarray[ShapeT, DTypeT]: ... + # Keep in sync with `MaskedArray.__getitem__` @overload def __getitem__(self, key: _ArrayInt_co | tuple[_ArrayInt_co, ...], /) -> ndarray[_AnyShape, _DTypeT_co]: ... @overload def __getitem__(self, key: SupportsIndex | tuple[SupportsIndex, ...], /) -> Any: ... @overload def __getitem__(self, key: _ToIndices, /) -> ndarray[_AnyShape, _DTypeT_co]: ... + @overload # can be of any shape + def __getitem__(self: NDArray[void], key: str, /) -> ndarray[_ShapeT_co | _AnyShape]: ... @overload - def __getitem__(self: NDArray[void], key: str, /) -> ndarray[_ShapeT_co, np.dtype]: ... - @overload - def __getitem__(self: NDArray[void], key: list[str], /) -> ndarray[_ShapeT_co, _dtype[void]]: ... + def __getitem__(self: NDArray[void], key: list[str], /) -> ndarray[_ShapeT_co | _AnyShape, dtype[void]]: ... @overload # flexible | object_ | bool def __setitem__( - self: ndarray[Any, dtype[flexible | object_ | np.bool] | dtypes.StringDType], + self: ndarray[Any, dtype[flexible | object_ | bool_] | dtypes.StringDType], key: _ToIndices, value: object, /, @@ -2145,21 +2214,28 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @property def ctypes(self) -> _ctypes[int]: ... + + # @property def shape(self) -> _ShapeT_co: ... @shape.setter + @deprecated("In-place shape modification will be deprecated in NumPy 2.5.", category=PendingDeprecationWarning) def shape(self, value: _ShapeLike) -> None: ... + + # @property def strides(self) -> _Shape: ... @strides.setter + @deprecated("Setting the strides on a NumPy array has been deprecated in NumPy 2.4") def strides(self, value: _ShapeLike) -> None: ... - def byteswap(self, inplace: builtins.bool = ...) -> Self: ... - def fill(self, value: Any) -> None: ... + + # + def byteswap(self, inplace: py_bool = ...) -> Self: ... @property def flat(self) -> flatiter[Self]: ... @overload # use the same output type as that of the underlying `generic` - def item(self: NDArray[generic[_T]], i0: SupportsIndex | tuple[SupportsIndex, ...] = ..., /, *args: SupportsIndex) -> _T: ... + def item[T](self: NDArray[generic[T]], i0: SupportsIndex | tuple[SupportsIndex, ...] = ..., /, *args: SupportsIndex) -> T: ... @overload # special casing for `StringDType`, which has no scalar type def item( self: ndarray[Any, dtypes.StringDType], @@ -2168,41 +2244,38 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): *args: SupportsIndex, ) -> str: ... + # keep in sync with `ma.MaskedArray.tolist` @overload # this first overload prevents mypy from over-eagerly selecting `tuple[()]` in case of `_AnyShape` - def tolist(self: ndarray[tuple[Never], dtype[generic[_T]]], /) -> Any: ... + def tolist[T](self: ndarray[tuple[Never], dtype[generic[T]]], /) -> Any: ... @overload - def tolist(self: ndarray[tuple[()], dtype[generic[_T]]], /) -> _T: ... + def tolist[T](self: ndarray[tuple[()], dtype[generic[T]]], /) -> T: ... @overload - def tolist(self: ndarray[tuple[int], dtype[generic[_T]]], /) -> list[_T]: ... + def tolist[T](self: ndarray[tuple[int], dtype[generic[T]]], /) -> list[T]: ... @overload - def tolist(self: ndarray[tuple[int, int], dtype[generic[_T]]], /) -> list[list[_T]]: ... + def tolist[T](self: ndarray[tuple[int, int], dtype[generic[T]]], /) -> list[list[T]]: ... @overload - def tolist(self: ndarray[tuple[int, int, int], dtype[generic[_T]]], /) -> list[list[list[_T]]]: ... + def tolist[T](self: ndarray[tuple[int, int, int], dtype[generic[T]]], /) -> list[list[list[T]]]: ... @overload def tolist(self, /) -> Any: ... @overload - def resize(self, new_shape: _ShapeLike, /, *, refcheck: builtins.bool = ...) -> None: ... + def resize(self, new_shape: _ShapeLike, /, *, refcheck: py_bool = True) -> None: ... @overload - def resize(self, /, *new_shape: SupportsIndex, refcheck: builtins.bool = ...) -> None: ... - - def setflags(self, write: builtins.bool = ..., align: builtins.bool = ..., uic: builtins.bool = ...) -> None: ... + def resize(self, /, *new_shape: SupportsIndex, refcheck: py_bool = True) -> None: ... + # keep in sync with `ma.MaskedArray.squeeze` def squeeze( self, + /, axis: SupportsIndex | tuple[SupportsIndex, ...] | None = ..., ) -> ndarray[_AnyShape, _DTypeT_co]: ... - def swapaxes( - self, - axis1: SupportsIndex, - axis2: SupportsIndex, - ) -> ndarray[_AnyShape, _DTypeT_co]: ... + def swapaxes(self, axis1: SupportsIndex, axis2: SupportsIndex, /) -> Self: ... @overload def transpose(self, axes: _ShapeLike | None, /) -> Self: ... @overload - def transpose(self, *axes: SupportsIndex) -> Self: ... + def transpose(self, /, *axes: SupportsIndex) -> Self: ... @overload def all( @@ -2212,7 +2285,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): keepdims: L[False, 0] = False, *, where: _ArrayLikeBool_co = True - ) -> np.bool: ... + ) -> bool_: ... @overload def all( self, @@ -2221,25 +2294,25 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): keepdims: SupportsIndex = False, *, where: _ArrayLikeBool_co = True, - ) -> np.bool | NDArray[np.bool]: ... + ) -> bool_ | NDArray[bool_]: ... @overload - def all( + def all[ArrayT: ndarray]( self, axis: int | tuple[int, ...] | None, - out: _ArrayT, + out: ArrayT, keepdims: SupportsIndex = False, *, where: _ArrayLikeBool_co = True, - ) -> _ArrayT: ... + ) -> ArrayT: ... @overload - def all( + def all[ArrayT: ndarray]( self, axis: int | tuple[int, ...] | None = None, *, - out: _ArrayT, + out: ArrayT, keepdims: SupportsIndex = False, where: _ArrayLikeBool_co = True, - ) -> _ArrayT: ... + ) -> ArrayT: ... @overload def any( @@ -2249,7 +2322,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): keepdims: L[False, 0] = False, *, where: _ArrayLikeBool_co = True - ) -> np.bool: ... + ) -> bool_: ... @overload def any( self, @@ -2258,32 +2331,32 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): keepdims: SupportsIndex = False, *, where: _ArrayLikeBool_co = True, - ) -> np.bool | NDArray[np.bool]: ... + ) -> bool_ | NDArray[bool_]: ... @overload - def any( + def any[ArrayT: ndarray]( self, axis: int | tuple[int, ...] | None, - out: _ArrayT, + out: ArrayT, keepdims: SupportsIndex = False, *, where: _ArrayLikeBool_co = True, - ) -> _ArrayT: ... + ) -> ArrayT: ... @overload - def any( + def any[ArrayT: ndarray]( self, axis: int | tuple[int, ...] | None = None, *, - out: _ArrayT, + out: ArrayT, keepdims: SupportsIndex = False, where: _ArrayLikeBool_co = True, - ) -> _ArrayT: ... + ) -> ArrayT: ... # @overload def partition( self, - /, kth: _ArrayLikeInt, + /, axis: SupportsIndex = -1, kind: _PartitionKind = "introselect", order: None = None, @@ -2291,8 +2364,8 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def partition( self: NDArray[void], - /, kth: _ArrayLikeInt, + /, axis: SupportsIndex = -1, kind: _PartitionKind = "introselect", order: str | Sequence[str] | None = None, @@ -2302,8 +2375,8 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def argpartition( self, - /, kth: _ArrayLikeInt, + /, axis: SupportsIndex | None = -1, kind: _PartitionKind = "introselect", order: None = None, @@ -2311,142 +2384,175 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def argpartition( self: NDArray[void], - /, kth: _ArrayLikeInt, + /, axis: SupportsIndex | None = -1, kind: _PartitionKind = "introselect", order: str | Sequence[str] | None = None, ) -> NDArray[intp]: ... - # + # keep in sync with `ma.MaskedArray.diagonal` def diagonal( self, - offset: SupportsIndex = ..., - axis1: SupportsIndex = ..., - axis2: SupportsIndex = ..., + offset: SupportsIndex = 0, + axis1: SupportsIndex = 0, + axis2: SupportsIndex = 1, ) -> ndarray[_AnyShape, _DTypeT_co]: ... # 1D + 1D returns a scalar; # all other with at least 1 non-0D array return an ndarray. @overload - def dot(self, b: _ScalarLike_co, out: None = ...) -> NDArray[Any]: ... + def dot(self, b: _ScalarLike_co, /, out: None = None) -> NDArray[Any]: ... @overload - def dot(self, b: ArrayLike, out: None = ...) -> Any: ... # type: ignore[misc] + def dot(self, b: ArrayLike, /, out: None = None) -> Any: ... @overload - def dot(self, b: ArrayLike, out: _ArrayT) -> _ArrayT: ... - - # `nonzero()` is deprecated for 0d arrays/generics - def nonzero(self) -> tuple[NDArray[intp], ...]: ... + def dot[ArrayT: ndarray](self, b: ArrayLike, /, out: ArrayT) -> ArrayT: ... - # `put` is technically available to `generic`, - # but is pointless as `generic`s are immutable - def put(self, /, indices: _ArrayLikeInt_co, values: ArrayLike, mode: _ModeKind = "raise") -> None: ... + # `nonzero()` raises for 0d arrays/generics + def nonzero(self) -> tuple[ndarray[tuple[int], dtype[intp]], ...]: ... @overload - def searchsorted( # type: ignore[misc] + def searchsorted( self, # >= 1D array v: _ScalarLike_co, # 0D array-like - side: _SortSide = ..., - sorter: _ArrayLikeInt_co | None = ..., + /, + side: _SortSide = "left", + sorter: _ArrayLikeInt_co | None = None, ) -> intp: ... @overload def searchsorted( self, # >= 1D array v: ArrayLike, - side: _SortSide = ..., - sorter: _ArrayLikeInt_co | None = ..., + /, + side: _SortSide = "left", + sorter: _ArrayLikeInt_co | None = None, ) -> NDArray[intp]: ... def sort( self, - axis: SupportsIndex = ..., - kind: _SortKind | None = ..., - order: str | Sequence[str] | None = ..., + /, + axis: SupportsIndex = -1, + kind: _SortKind | None = None, + order: str | Sequence[str] | None = None, *, - stable: bool | None = ..., + stable: py_bool | None = None, ) -> None: ... + # Keep in sync with `MaskedArray.trace` @overload def trace( self, # >= 2D array - offset: SupportsIndex = ..., - axis1: SupportsIndex = ..., - axis2: SupportsIndex = ..., - dtype: DTypeLike = ..., - out: None = ..., + /, + offset: SupportsIndex = 0, + axis1: SupportsIndex = 0, + axis2: SupportsIndex = 1, + dtype: DTypeLike | None = None, + out: None = None, ) -> Any: ... @overload - def trace( + def trace[ArrayT: ndarray]( self, # >= 2D array - offset: SupportsIndex = ..., - axis1: SupportsIndex = ..., - axis2: SupportsIndex = ..., - dtype: DTypeLike = ..., - out: _ArrayT = ..., - ) -> _ArrayT: ... + /, + offset: SupportsIndex = 0, + axis1: SupportsIndex = 0, + axis2: SupportsIndex = 1, + dtype: DTypeLike | None = None, + *, + out: ArrayT, + ) -> ArrayT: ... + @overload + def trace[ArrayT: ndarray]( + self, # >= 2D array + /, + offset: SupportsIndex, + axis1: SupportsIndex, + axis2: SupportsIndex, + dtype: DTypeLike | None, + out: ArrayT, + ) -> ArrayT: ... @overload - def take( # type: ignore[misc] - self: NDArray[_ScalarT], + def take[ScalarT: generic]( + self: NDArray[ScalarT], indices: _IntLike_co, + /, axis: SupportsIndex | None = ..., - out: None = ..., + out: None = None, mode: _ModeKind = ..., - ) -> _ScalarT: ... + ) -> ScalarT: ... @overload - def take( # type: ignore[misc] + def take( self, indices: _ArrayLikeInt_co, + /, axis: SupportsIndex | None = ..., - out: None = ..., + out: None = None, mode: _ModeKind = ..., ) -> ndarray[_AnyShape, _DTypeT_co]: ... @overload - def take( + def take[ArrayT: ndarray]( self, indices: _ArrayLikeInt_co, + /, axis: SupportsIndex | None = ..., - out: _ArrayT = ..., + *, + out: ArrayT, mode: _ModeKind = ..., - ) -> _ArrayT: ... - + ) -> ArrayT: ... @overload - def repeat( + def take[ArrayT: ndarray]( self, - repeats: _ArrayLikeInt_co, - axis: None = None, - ) -> ndarray[tuple[int], _DTypeT_co]: ... + indices: _ArrayLikeInt_co, + /, + axis: SupportsIndex | None, + out: ArrayT, + mode: _ModeKind = ..., + ) -> ArrayT: ... + + # keep in sync with `ma.MaskedArray.repeat` @overload - def repeat( - self, - repeats: _ArrayLikeInt_co, - axis: SupportsIndex, - ) -> ndarray[_AnyShape, _DTypeT_co]: ... + def repeat(self, repeats: _ArrayLikeInt_co, /, axis: None = None) -> ndarray[tuple[int], _DTypeT_co]: ... + @overload + def repeat(self, repeats: _ArrayLikeInt_co, /, axis: SupportsIndex) -> ndarray[_AnyShape, _DTypeT_co]: ... + # keep in sync with `ma.MaskedArray.flatten` and `ma.MaskedArray.ravel` def flatten(self, /, order: _OrderKACF = "C") -> ndarray[tuple[int], _DTypeT_co]: ... def ravel(self, /, order: _OrderKACF = "C") -> ndarray[tuple[int], _DTypeT_co]: ... + # Keep in sync with `MaskedArray.reshape` # NOTE: reshape also accepts negative integers, so we can't use integer literals @overload # (None) - def reshape(self, shape: None, /, *, order: _OrderACF = "C", copy: builtins.bool | None = None) -> Self: ... + def reshape(self, shape: None, /, *, order: _OrderACF = "C", copy: py_bool | None = None) -> Self: ... @overload # (empty_sequence) - def reshape( # type: ignore[overload-overlap] # mypy false positive + def reshape( # mypy false positive self, shape: Sequence[Never], /, *, order: _OrderACF = "C", - copy: builtins.bool | None = None, + copy: py_bool | None = None, ) -> ndarray[tuple[()], _DTypeT_co]: ... @overload # (() | (int) | (int, int) | ....) # up to 8-d - def reshape( + def reshape[ + AnyShapeT: ( + tuple[()], # 0d + tuple[int], # 1d + tuple[int, int], # 2d + tuple[int, int, int], # 3d + tuple[int, int, int, int], # 4d + tuple[int, int, int, int, int], # 5d + tuple[int, int, int, int, int, int], # 6d + tuple[int, int, int, int, int, int, int], # 7d + tuple[int, int, int, int, int, int, int, int], # 8d + ) + ]( self, - shape: _AnyShapeT, + shape: AnyShapeT, /, *, order: _OrderACF = "C", - copy: builtins.bool | None = None, - ) -> ndarray[_AnyShapeT, _DTypeT_co]: ... + copy: py_bool | None = None, + ) -> ndarray[AnyShapeT, _DTypeT_co]: ... @overload # (index) def reshape( self, @@ -2454,7 +2560,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): /, *, order: _OrderACF = "C", - copy: builtins.bool | None = None, + copy: py_bool | None = None, ) -> ndarray[tuple[int], _DTypeT_co]: ... @overload # (index, index) def reshape( @@ -2464,7 +2570,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): /, *, order: _OrderACF = "C", - copy: builtins.bool | None = None, + copy: py_bool | None = None, ) -> ndarray[tuple[int, int], _DTypeT_co]: ... @overload # (index, index, index) def reshape( @@ -2475,7 +2581,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): /, *, order: _OrderACF = "C", - copy: builtins.bool | None = None, + copy: py_bool | None = None, ) -> ndarray[tuple[int, int, int], _DTypeT_co]: ... @overload # (index, index, index, index) def reshape( @@ -2487,7 +2593,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): /, *, order: _OrderACF = "C", - copy: builtins.bool | None = None, + copy: py_bool | None = None, ) -> ndarray[tuple[int, int, int, int], _DTypeT_co]: ... @overload # (int, *(index, ...)) def reshape( @@ -2496,7 +2602,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): /, *shape: SupportsIndex, order: _OrderACF = "C", - copy: builtins.bool | None = None, + copy: py_bool | None = None, ) -> ndarray[_AnyShape, _DTypeT_co]: ... @overload # (sequence[index]) def reshape( @@ -2505,166 +2611,169 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): /, *, order: _OrderACF = "C", - copy: builtins.bool | None = None, + copy: py_bool | None = None, ) -> ndarray[_AnyShape, _DTypeT_co]: ... @overload - def astype( + def astype[ScalarT: generic]( self, - dtype: _DTypeLike[_ScalarT], + dtype: _DTypeLike[ScalarT], order: _OrderKACF = ..., casting: _CastingKind = ..., - subok: builtins.bool = ..., - copy: builtins.bool | _CopyMode = ..., - ) -> ndarray[_ShapeT_co, dtype[_ScalarT]]: ... + subok: py_bool = ..., + copy: py_bool | _CopyMode = ..., + ) -> ndarray[_ShapeT_co, dtype[ScalarT]]: ... @overload def astype( self, - dtype: DTypeLike, + dtype: DTypeLike | None, order: _OrderKACF = ..., casting: _CastingKind = ..., - subok: builtins.bool = ..., - copy: builtins.bool | _CopyMode = ..., + subok: py_bool = ..., + copy: py_bool | _CopyMode = ..., ) -> ndarray[_ShapeT_co, dtype]: ... # @overload # () def view(self, /) -> Self: ... @overload # (dtype: T) - def view(self, /, dtype: _DTypeT | _HasDType[_DTypeT]) -> ndarray[_ShapeT_co, _DTypeT]: ... + def view[DTypeT: dtype](self, /, dtype: DTypeT | _HasDType[DTypeT]) -> ndarray[_ShapeT_co, DTypeT]: ... @overload # (dtype: dtype[T]) - def view(self, /, dtype: _DTypeLike[_ScalarT]) -> NDArray[_ScalarT]: ... + def view[ScalarT: generic](self, /, dtype: _DTypeLike[ScalarT]) -> ndarray[_ShapeT_co, dtype[ScalarT]]: ... @overload # (type: T) - def view(self, /, *, type: type[_ArrayT]) -> _ArrayT: ... + def view[ArrayT: ndarray](self, /, *, type: type[ArrayT]) -> ArrayT: ... @overload # (_: T) - def view(self, /, dtype: type[_ArrayT]) -> _ArrayT: ... + def view[ArrayT: ndarray](self, /, dtype: type[ArrayT]) -> ArrayT: ... @overload # (dtype: ?) def view(self, /, dtype: DTypeLike) -> ndarray[_ShapeT_co, dtype]: ... - @overload # (dtype: ?, type: type[T]) - def view(self, /, dtype: DTypeLike, type: type[_ArrayT]) -> _ArrayT: ... + @overload # (dtype: ?, type: T) + def view[ArrayT: ndarray](self, /, dtype: DTypeLike, type: type[ArrayT]) -> ArrayT: ... - def setfield(self, /, val: ArrayLike, dtype: DTypeLike, offset: SupportsIndex = 0) -> None: ... + def setfield(self, val: ArrayLike, /, dtype: DTypeLike, offset: SupportsIndex = 0) -> None: ... @overload - def getfield(self, dtype: _DTypeLike[_ScalarT], offset: SupportsIndex = 0) -> NDArray[_ScalarT]: ... + def getfield[ScalarT: generic](self, /, dtype: _DTypeLike[ScalarT], offset: SupportsIndex = 0) -> NDArray[ScalarT]: ... @overload - def getfield(self, dtype: DTypeLike, offset: SupportsIndex = 0) -> NDArray[Any]: ... + def getfield(self, /, dtype: DTypeLike, offset: SupportsIndex = 0) -> NDArray[Any]: ... def __index__(self: NDArray[integer], /) -> int: ... - def __complex__(self: NDArray[number | np.bool | object_], /) -> complex: ... + def __complex__(self: NDArray[number | bool_ | object_], /) -> complex: ... def __len__(self) -> int: ... - def __contains__(self, value: object, /) -> builtins.bool: ... - - @overload # == 1-d & object_ - def __iter__(self: ndarray[tuple[int], dtype[object_]], /) -> Iterator[Any]: ... - @overload # == 1-d - def __iter__(self: ndarray[tuple[int], dtype[_ScalarT]], /) -> Iterator[_ScalarT]: ... + def __contains__(self, value: object, /) -> py_bool: ... + + # NOTE: This weird `Never` tuple works around a strange mypy issue where it assigns + # `tuple[int]` to `tuple[Never]` or `tuple[int, int]` to `tuple[Never, Never]`. + # This way the bug only occurs for 9-D arrays, which are probably not very common. + @overload + def __iter__( + self: ndarray[tuple[Never, Never, Never, Never, Never, Never, Never, Never, Never], Any], / + ) -> Iterator[Any]: ... + @overload # == 1-d & dtype[T \ object_] + def __iter__[ScalarT: _ScalarNotObject](self: ndarray[tuple[int], dtype[ScalarT]], /) -> Iterator[ScalarT]: ... + @overload # == 1-d & StringDType + def __iter__(self: ndarray[tuple[int], dtypes.StringDType], /) -> Iterator[str]: ... @overload # >= 2-d - def __iter__(self: ndarray[tuple[int, int, *tuple[int, ...]], dtype[_ScalarT]], /) -> Iterator[NDArray[_ScalarT]]: ... + def __iter__[DTypeT: dtype]( + self: ndarray[tuple[int, int, *tuple[int, ...]], DTypeT], / + ) -> Iterator[ndarray[_AnyShape, DTypeT]]: ... @overload # ?-d def __iter__(self, /) -> Iterator[Any]: ... # @overload - def __lt__(self: _ArrayNumber_co, other: _ArrayLikeNumber_co, /) -> NDArray[np.bool]: ... + def __lt__(self: _ArrayNumber_co, other: _ArrayLikeNumber_co, /) -> NDArray[bool_]: ... @overload - def __lt__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co, /) -> NDArray[np.bool]: ... + def __lt__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co, /) -> NDArray[bool_]: ... @overload - def __lt__(self: NDArray[datetime64], other: _ArrayLikeDT64_co, /) -> NDArray[np.bool]: ... + def __lt__(self: NDArray[datetime64], other: _ArrayLikeDT64_co, /) -> NDArray[bool_]: ... @overload - def __lt__(self: NDArray[bytes_], other: _ArrayLikeBytes_co, /) -> NDArray[np.bool]: ... + def __lt__(self: NDArray[bytes_], other: _ArrayLikeBytes_co, /) -> NDArray[bool_]: ... @overload - def __lt__( - self: ndarray[Any, dtype[str_] | dtypes.StringDType], other: _ArrayLikeStr_co | _ArrayLikeString_co, / - ) -> NDArray[np.bool]: ... + def __lt__(self: _ArrayString, other: _ArrayLikeStr_co | _ArrayLikeString_co, /) -> NDArray[bool_]: ... @overload - def __lt__(self: NDArray[object_], other: object, /) -> NDArray[np.bool]: ... + def __lt__(self: NDArray[object_], other: object, /) -> NDArray[bool_]: ... @overload - def __lt__(self, other: _ArrayLikeObject_co, /) -> NDArray[np.bool]: ... + def __lt__(self, other: _ArrayLikeObject_co, /) -> NDArray[bool_]: ... # @overload - def __le__(self: _ArrayNumber_co, other: _ArrayLikeNumber_co, /) -> NDArray[np.bool]: ... + def __le__(self: _ArrayNumber_co, other: _ArrayLikeNumber_co, /) -> NDArray[bool_]: ... @overload - def __le__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co, /) -> NDArray[np.bool]: ... + def __le__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co, /) -> NDArray[bool_]: ... @overload - def __le__(self: NDArray[datetime64], other: _ArrayLikeDT64_co, /) -> NDArray[np.bool]: ... + def __le__(self: NDArray[datetime64], other: _ArrayLikeDT64_co, /) -> NDArray[bool_]: ... @overload - def __le__(self: NDArray[bytes_], other: _ArrayLikeBytes_co, /) -> NDArray[np.bool]: ... + def __le__(self: NDArray[bytes_], other: _ArrayLikeBytes_co, /) -> NDArray[bool_]: ... @overload - def __le__( - self: ndarray[Any, dtype[str_] | dtypes.StringDType], other: _ArrayLikeStr_co | _ArrayLikeString_co, / - ) -> NDArray[np.bool]: ... + def __le__(self: _ArrayString, other: _ArrayLikeStr_co | _ArrayLikeString_co, /) -> NDArray[bool_]: ... @overload - def __le__(self: NDArray[object_], other: object, /) -> NDArray[np.bool]: ... + def __le__(self: NDArray[object_], other: object, /) -> NDArray[bool_]: ... @overload - def __le__(self, other: _ArrayLikeObject_co, /) -> NDArray[np.bool]: ... + def __le__(self, other: _ArrayLikeObject_co, /) -> NDArray[bool_]: ... # @overload - def __gt__(self: _ArrayNumber_co, other: _ArrayLikeNumber_co, /) -> NDArray[np.bool]: ... + def __gt__(self: _ArrayNumber_co, other: _ArrayLikeNumber_co, /) -> NDArray[bool_]: ... @overload - def __gt__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co, /) -> NDArray[np.bool]: ... + def __gt__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co, /) -> NDArray[bool_]: ... @overload - def __gt__(self: NDArray[datetime64], other: _ArrayLikeDT64_co, /) -> NDArray[np.bool]: ... + def __gt__(self: NDArray[datetime64], other: _ArrayLikeDT64_co, /) -> NDArray[bool_]: ... @overload - def __gt__(self: NDArray[bytes_], other: _ArrayLikeBytes_co, /) -> NDArray[np.bool]: ... + def __gt__(self: NDArray[bytes_], other: _ArrayLikeBytes_co, /) -> NDArray[bool_]: ... @overload - def __gt__( - self: ndarray[Any, dtype[str_] | dtypes.StringDType], other: _ArrayLikeStr_co | _ArrayLikeString_co, / - ) -> NDArray[np.bool]: ... + def __gt__(self: _ArrayString, other: _ArrayLikeStr_co | _ArrayLikeString_co, /) -> NDArray[bool_]: ... @overload - def __gt__(self: NDArray[object_], other: object, /) -> NDArray[np.bool]: ... + def __gt__(self: NDArray[object_], other: object, /) -> NDArray[bool_]: ... @overload - def __gt__(self, other: _ArrayLikeObject_co, /) -> NDArray[np.bool]: ... + def __gt__(self, other: _ArrayLikeObject_co, /) -> NDArray[bool_]: ... # @overload - def __ge__(self: _ArrayNumber_co, other: _ArrayLikeNumber_co, /) -> NDArray[np.bool]: ... + def __ge__(self: _ArrayNumber_co, other: _ArrayLikeNumber_co, /) -> NDArray[bool_]: ... @overload - def __ge__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co, /) -> NDArray[np.bool]: ... + def __ge__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co, /) -> NDArray[bool_]: ... @overload - def __ge__(self: NDArray[datetime64], other: _ArrayLikeDT64_co, /) -> NDArray[np.bool]: ... + def __ge__(self: NDArray[datetime64], other: _ArrayLikeDT64_co, /) -> NDArray[bool_]: ... @overload - def __ge__(self: NDArray[bytes_], other: _ArrayLikeBytes_co, /) -> NDArray[np.bool]: ... + def __ge__(self: NDArray[bytes_], other: _ArrayLikeBytes_co, /) -> NDArray[bool_]: ... @overload - def __ge__( - self: ndarray[Any, dtype[str_] | dtypes.StringDType], other: _ArrayLikeStr_co | _ArrayLikeString_co, / - ) -> NDArray[np.bool]: ... + def __ge__(self: _ArrayString, other: _ArrayLikeStr_co | _ArrayLikeString_co, /) -> NDArray[bool_]: ... @overload - def __ge__(self: NDArray[object_], other: object, /) -> NDArray[np.bool]: ... + def __ge__(self: NDArray[object_], other: object, /) -> NDArray[bool_]: ... @overload - def __ge__(self, other: _ArrayLikeObject_co, /) -> NDArray[np.bool]: ... + def __ge__(self, other: _ArrayLikeObject_co, /) -> NDArray[bool_]: ... # Unary ops # TODO: Uncomment once https://github.com/python/mypy/issues/14070 is fixed # @overload - # def __abs__(self: ndarray[_ShapeT, dtypes.Complex64DType], /) -> ndarray[_ShapeT, dtypes.Float32DType]: ... + # def __abs__[ShapeT: _Shape](self: ndarray[ShapeT, dtypes.Complex64DType], /) -> ndarray[ShapeT, dtypes.Float32DType]: ... # @overload - # def __abs__(self: ndarray[_ShapeT, dtypes.Complex128DType], /) -> ndarray[_ShapeT, dtypes.Float64DType]: ... + # def __abs__[ShapeT: _Shape](self: ndarray[ShapeT, dtypes.Complex128DType], /) -> ndarray[ShapeT, dtypes.Float64DType]: ... # @overload - # def __abs__(self: ndarray[_ShapeT, dtypes.CLongDoubleDType], /) -> ndarray[_ShapeT, dtypes.LongDoubleDType]: ... + # def __abs__[ShapeT: _Shape](self: ndarray[ShapeT, dtypes.CLongDoubleDType], /) -> ndarray[ShapeT, dtypes.LongDoubleDType]: ... # @overload - # def __abs__(self: ndarray[_ShapeT, dtype[complex128]], /) -> ndarray[_ShapeT, dtype[float64]]: ... + # def __abs__[ShapeT: _Shape](self: ndarray[ShapeT, dtype[complex128]], /) -> ndarray[ShapeT, dtype[float64]]: ... @overload - def __abs__(self: ndarray[_ShapeT, dtype[complexfloating[_NBit]]], /) -> ndarray[_ShapeT, dtype[floating[_NBit]]]: ... + def __abs__[ShapeT: _Shape, NBitT: NBitBase]( + self: ndarray[ShapeT, dtype[complexfloating[NBitT]]], / + ) -> ndarray[ShapeT, dtype[floating[NBitT]]]: ... @overload - def __abs__(self: _RealArrayT, /) -> _RealArrayT: ... + def __abs__[ArrayT: NDArray[bool_ | integer | floating | timedelta64 | object_]](self: ArrayT, /) -> ArrayT: ... - def __invert__(self: _IntegralArrayT, /) -> _IntegralArrayT: ... # noqa: PYI019 - def __neg__(self: _NumericArrayT, /) -> _NumericArrayT: ... # noqa: PYI019 - def __pos__(self: _NumericArrayT, /) -> _NumericArrayT: ... # noqa: PYI019 + def __invert__[ArrayT: NDArray[bool_ | integer | object_]](self: ArrayT, /) -> ArrayT: ... # noqa: PYI019 + def __neg__[ArrayT: _ArrayNumeric](self: ArrayT, /) -> ArrayT: ... # noqa: PYI019 + def __pos__[ArrayT: _ArrayNumeric](self: ArrayT, /) -> ArrayT: ... # noqa: PYI019 # Binary ops # TODO: Support the "1d @ 1d -> scalar" case @overload - def __matmul__(self: NDArray[_NumberT], other: _ArrayLikeBool_co, /) -> NDArray[_NumberT]: ... + def __matmul__[ScalarT: number](self: NDArray[ScalarT], other: _ArrayLikeBool_co, /) -> NDArray[ScalarT]: ... @overload - def __matmul__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... # type: ignore[overload-overlap] + def __matmul__(self: NDArray[bool_], other: _ArrayLikeBool_co, /) -> NDArray[bool_]: ... @overload - def __matmul__(self: NDArray[np.bool], other: _ArrayLike[_NumberT], /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] + def __matmul__[ScalarT: number](self: NDArray[bool_], other: _ArrayLike[ScalarT], /) -> NDArray[ScalarT]: ... @overload def __matmul__(self: NDArray[floating[_64Bit]], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... @overload @@ -2674,11 +2783,11 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def __matmul__(self: _ArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> NDArray[complex128]: ... @overload - def __matmul__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] + def __matmul__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... @overload - def __matmul__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... # type: ignore[overload-overlap] + def __matmul__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... @overload - def __matmul__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... # type: ignore[overload-overlap] + def __matmul__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... @overload def __matmul__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating]: ... @overload @@ -2689,11 +2798,11 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): def __matmul__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... @overload # signature equivalent to __matmul__ - def __rmatmul__(self: NDArray[_NumberT], other: _ArrayLikeBool_co, /) -> NDArray[_NumberT]: ... + def __rmatmul__[ScalarT: number](self: NDArray[ScalarT], other: _ArrayLikeBool_co, /) -> NDArray[ScalarT]: ... @overload - def __rmatmul__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... # type: ignore[overload-overlap] + def __rmatmul__(self: NDArray[bool_], other: _ArrayLikeBool_co, /) -> NDArray[bool_]: ... @overload - def __rmatmul__(self: NDArray[np.bool], other: _ArrayLike[_NumberT], /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] + def __rmatmul__[ScalarT: number](self: NDArray[bool_], other: _ArrayLike[ScalarT], /) -> NDArray[ScalarT]: ... @overload def __rmatmul__(self: NDArray[floating[_64Bit]], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... @overload @@ -2703,11 +2812,11 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def __rmatmul__(self: _ArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> NDArray[complex128]: ... @overload - def __rmatmul__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] + def __rmatmul__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... @overload - def __rmatmul__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... # type: ignore[overload-overlap] + def __rmatmul__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... @overload - def __rmatmul__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... # type: ignore[overload-overlap] + def __rmatmul__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... @overload def __rmatmul__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating]: ... @overload @@ -2718,21 +2827,23 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): def __rmatmul__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... @overload - def __mod__(self: NDArray[_RealNumberT], other: int | np.bool, /) -> ndarray[_ShapeT_co, dtype[_RealNumberT]]: ... + def __mod__[ScalarT: floating | integer]( + self: NDArray[ScalarT], other: int | bool_, / + ) -> ndarray[_ShapeT_co, dtype[ScalarT]]: ... @overload - def __mod__(self: NDArray[_RealNumberT], other: _ArrayLikeBool_co, /) -> NDArray[_RealNumberT]: ... # type: ignore[overload-overlap] + def __mod__[ScalarT: floating | integer](self: NDArray[ScalarT], other: _ArrayLikeBool_co, /) -> NDArray[ScalarT]: ... @overload - def __mod__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... # type: ignore[overload-overlap] + def __mod__(self: NDArray[bool_], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... @overload - def __mod__(self: NDArray[np.bool], other: _ArrayLike[_RealNumberT], /) -> NDArray[_RealNumberT]: ... # type: ignore[overload-overlap] + def __mod__[ScalarT: floating | integer](self: NDArray[bool_], other: _ArrayLike[ScalarT], /) -> NDArray[ScalarT]: ... @overload def __mod__(self: NDArray[float64], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... @overload def __mod__(self: _ArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> NDArray[float64]: ... @overload - def __mod__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] + def __mod__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... @overload - def __mod__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... # type: ignore[overload-overlap] + def __mod__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... @overload def __mod__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... @overload @@ -2743,21 +2854,23 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): def __mod__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... @overload # signature equivalent to __mod__ - def __rmod__(self: NDArray[_RealNumberT], other: int | np.bool, /) -> ndarray[_ShapeT_co, dtype[_RealNumberT]]: ... + def __rmod__[ScalarT: floating | integer]( + self: NDArray[ScalarT], other: int | bool_, / + ) -> ndarray[_ShapeT_co, dtype[ScalarT]]: ... @overload - def __rmod__(self: NDArray[_RealNumberT], other: _ArrayLikeBool_co, /) -> NDArray[_RealNumberT]: ... # type: ignore[overload-overlap] + def __rmod__[ScalarT: floating | integer](self: NDArray[ScalarT], other: _ArrayLikeBool_co, /) -> NDArray[ScalarT]: ... @overload - def __rmod__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... # type: ignore[overload-overlap] + def __rmod__(self: NDArray[bool_], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... @overload - def __rmod__(self: NDArray[np.bool], other: _ArrayLike[_RealNumberT], /) -> NDArray[_RealNumberT]: ... # type: ignore[overload-overlap] + def __rmod__[ScalarT: floating | integer](self: NDArray[bool_], other: _ArrayLike[ScalarT], /) -> NDArray[ScalarT]: ... @overload def __rmod__(self: NDArray[float64], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... @overload def __rmod__(self: _ArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> NDArray[float64]: ... @overload - def __rmod__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] + def __rmod__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... @overload - def __rmod__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... # type: ignore[overload-overlap] + def __rmod__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... @overload def __rmod__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... @overload @@ -2768,55 +2881,68 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): def __rmod__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... @overload - def __divmod__(self: NDArray[_RealNumberT], rhs: int | np.bool, /) -> _2Tuple[ndarray[_ShapeT_co, dtype[_RealNumberT]]]: ... + def __divmod__[ScalarT: floating | integer]( + self: NDArray[ScalarT], rhs: int | bool_, / + ) -> _2Tuple[ndarray[_ShapeT_co, dtype[ScalarT]]]: ... @overload - def __divmod__(self: NDArray[_RealNumberT], rhs: _ArrayLikeBool_co, /) -> _2Tuple[NDArray[_RealNumberT]]: ... # type: ignore[overload-overlap] + def __divmod__[ScalarT: floating | integer]( + self: NDArray[ScalarT], rhs: _ArrayLikeBool_co, / + ) -> _2Tuple[NDArray[ScalarT]]: ... @overload - def __divmod__(self: NDArray[np.bool], rhs: _ArrayLikeBool_co, /) -> _2Tuple[NDArray[int8]]: ... # type: ignore[overload-overlap] + def __divmod__(self: NDArray[bool_], rhs: _ArrayLikeBool_co, /) -> _2Tuple[NDArray[int8]]: ... @overload - def __divmod__(self: NDArray[np.bool], rhs: _ArrayLike[_RealNumberT], /) -> _2Tuple[NDArray[_RealNumberT]]: ... # type: ignore[overload-overlap] + def __divmod__[ScalarT: floating | integer]( + self: NDArray[bool_], rhs: _ArrayLike[ScalarT], / + ) -> _2Tuple[NDArray[ScalarT]]: ... @overload def __divmod__(self: NDArray[float64], rhs: _ArrayLikeFloat64_co, /) -> _2Tuple[NDArray[float64]]: ... @overload def __divmod__(self: _ArrayFloat64_co, rhs: _ArrayLike[floating[_64Bit]], /) -> _2Tuple[NDArray[float64]]: ... @overload - def __divmod__(self: _ArrayUInt_co, rhs: _ArrayLikeUInt_co, /) -> _2Tuple[NDArray[unsignedinteger]]: ... # type: ignore[overload-overlap] + def __divmod__(self: _ArrayUInt_co, rhs: _ArrayLikeUInt_co, /) -> _2Tuple[NDArray[unsignedinteger]]: ... @overload - def __divmod__(self: _ArrayInt_co, rhs: _ArrayLikeInt_co, /) -> _2Tuple[NDArray[signedinteger]]: ... # type: ignore[overload-overlap] + def __divmod__(self: _ArrayInt_co, rhs: _ArrayLikeInt_co, /) -> _2Tuple[NDArray[signedinteger]]: ... @overload def __divmod__(self: _ArrayFloat_co, rhs: _ArrayLikeFloat_co, /) -> _2Tuple[NDArray[floating]]: ... @overload def __divmod__(self: NDArray[timedelta64], rhs: _ArrayLike[timedelta64], /) -> tuple[NDArray[int64], NDArray[timedelta64]]: ... @overload # signature equivalent to __divmod__ - def __rdivmod__(self: NDArray[_RealNumberT], lhs: int | np.bool, /) -> _2Tuple[ndarray[_ShapeT_co, dtype[_RealNumberT]]]: ... + def __rdivmod__[ScalarT: floating | integer]( + self: NDArray[ScalarT], lhs: int | bool_, / + ) -> _2Tuple[ndarray[_ShapeT_co, dtype[ScalarT]]]: ... @overload - def __rdivmod__(self: NDArray[_RealNumberT], lhs: _ArrayLikeBool_co, /) -> _2Tuple[NDArray[_RealNumberT]]: ... # type: ignore[overload-overlap] + def __rdivmod__[ScalarT: floating | integer]( + self: NDArray[ScalarT], lhs: _ArrayLikeBool_co, / + ) -> _2Tuple[NDArray[ScalarT]]: ... @overload - def __rdivmod__(self: NDArray[np.bool], lhs: _ArrayLikeBool_co, /) -> _2Tuple[NDArray[int8]]: ... # type: ignore[overload-overlap] + def __rdivmod__(self: NDArray[bool_], lhs: _ArrayLikeBool_co, /) -> _2Tuple[NDArray[int8]]: ... @overload - def __rdivmod__(self: NDArray[np.bool], lhs: _ArrayLike[_RealNumberT], /) -> _2Tuple[NDArray[_RealNumberT]]: ... # type: ignore[overload-overlap] + def __rdivmod__[ScalarT: floating | integer]( + self: NDArray[bool_], lhs: _ArrayLike[ScalarT], / + ) -> _2Tuple[NDArray[ScalarT]]: ... @overload def __rdivmod__(self: NDArray[float64], lhs: _ArrayLikeFloat64_co, /) -> _2Tuple[NDArray[float64]]: ... @overload def __rdivmod__(self: _ArrayFloat64_co, lhs: _ArrayLike[floating[_64Bit]], /) -> _2Tuple[NDArray[float64]]: ... @overload - def __rdivmod__(self: _ArrayUInt_co, lhs: _ArrayLikeUInt_co, /) -> _2Tuple[NDArray[unsignedinteger]]: ... # type: ignore[overload-overlap] + def __rdivmod__(self: _ArrayUInt_co, lhs: _ArrayLikeUInt_co, /) -> _2Tuple[NDArray[unsignedinteger]]: ... @overload - def __rdivmod__(self: _ArrayInt_co, lhs: _ArrayLikeInt_co, /) -> _2Tuple[NDArray[signedinteger]]: ... # type: ignore[overload-overlap] + def __rdivmod__(self: _ArrayInt_co, lhs: _ArrayLikeInt_co, /) -> _2Tuple[NDArray[signedinteger]]: ... @overload def __rdivmod__(self: _ArrayFloat_co, lhs: _ArrayLikeFloat_co, /) -> _2Tuple[NDArray[floating]]: ... @overload def __rdivmod__(self: NDArray[timedelta64], lhs: _ArrayLike[timedelta64], /) -> tuple[NDArray[int64], NDArray[timedelta64]]: ... + # Keep in sync with `MaskedArray.__add__` @overload - def __add__(self: NDArray[_NumberT], other: int | np.bool, /) -> ndarray[_ShapeT_co, dtype[_NumberT]]: ... + def __add__[ScalarT: number](self: NDArray[ScalarT], other: int | bool_, /) -> ndarray[_ShapeT_co, dtype[ScalarT]]: ... @overload - def __add__(self: NDArray[_NumberT], other: _ArrayLikeBool_co, /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] + def __add__[ScalarT: number](self: NDArray[ScalarT], other: _ArrayLikeBool_co, /) -> NDArray[ScalarT]: ... @overload - def __add__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... # type: ignore[overload-overlap] + def __add__(self: NDArray[bool_], other: _ArrayLikeBool_co, /) -> NDArray[bool_]: ... @overload - def __add__(self: NDArray[np.bool], other: _ArrayLike[_NumberT], /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] + def __add__[ScalarT: number](self: NDArray[bool_], other: _ArrayLike[ScalarT], /) -> NDArray[ScalarT]: ... @overload def __add__(self: NDArray[float64], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... @overload @@ -2826,15 +2952,15 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def __add__(self: _ArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> NDArray[complex128]: ... @overload - def __add__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] + def __add__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... @overload - def __add__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... # type: ignore[overload-overlap] + def __add__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... @overload - def __add__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... # type: ignore[overload-overlap] + def __add__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... @overload - def __add__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating]: ... # type: ignore[overload-overlap] + def __add__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating]: ... @overload - def __add__(self: NDArray[number], other: _ArrayLikeNumber_co, /) -> NDArray[number]: ... # type: ignore[overload-overlap] + def __add__(self: NDArray[number], other: _ArrayLikeNumber_co, /) -> NDArray[number]: ... @overload def __add__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co, /) -> NDArray[timedelta64]: ... @overload @@ -2856,14 +2982,15 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def __add__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + # Keep in sync with `MaskedArray.__radd__` @overload # signature equivalent to __add__ - def __radd__(self: NDArray[_NumberT], other: int | np.bool, /) -> ndarray[_ShapeT_co, dtype[_NumberT]]: ... + def __radd__[ScalarT: number](self: NDArray[ScalarT], other: int | bool_, /) -> ndarray[_ShapeT_co, dtype[ScalarT]]: ... @overload - def __radd__(self: NDArray[_NumberT], other: _ArrayLikeBool_co, /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] + def __radd__[ScalarT: number](self: NDArray[ScalarT], other: _ArrayLikeBool_co, /) -> NDArray[ScalarT]: ... @overload - def __radd__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... # type: ignore[overload-overlap] + def __radd__(self: NDArray[bool_], other: _ArrayLikeBool_co, /) -> NDArray[bool_]: ... @overload - def __radd__(self: NDArray[np.bool], other: _ArrayLike[_NumberT], /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] + def __radd__[ScalarT: number](self: NDArray[bool_], other: _ArrayLike[ScalarT], /) -> NDArray[ScalarT]: ... @overload def __radd__(self: NDArray[float64], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... @overload @@ -2873,15 +3000,15 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def __radd__(self: _ArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> NDArray[complex128]: ... @overload - def __radd__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] + def __radd__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... @overload - def __radd__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... # type: ignore[overload-overlap] + def __radd__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... @overload - def __radd__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... # type: ignore[overload-overlap] + def __radd__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... @overload - def __radd__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating]: ... # type: ignore[overload-overlap] + def __radd__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating]: ... @overload - def __radd__(self: NDArray[number], other: _ArrayLikeNumber_co, /) -> NDArray[number]: ... # type: ignore[overload-overlap] + def __radd__(self: NDArray[number], other: _ArrayLikeNumber_co, /) -> NDArray[number]: ... @overload def __radd__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co, /) -> NDArray[timedelta64]: ... @overload @@ -2903,14 +3030,15 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def __radd__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + # Keep in sync with `MaskedArray.__sub__` @overload - def __sub__(self: NDArray[_NumberT], other: int | np.bool, /) -> ndarray[_ShapeT_co, dtype[_NumberT]]: ... + def __sub__[ScalarT: number](self: NDArray[ScalarT], other: int | bool_, /) -> ndarray[_ShapeT_co, dtype[ScalarT]]: ... @overload - def __sub__(self: NDArray[_NumberT], other: _ArrayLikeBool_co, /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] + def __sub__[ScalarT: number](self: NDArray[ScalarT], other: _ArrayLikeBool_co, /) -> NDArray[ScalarT]: ... @overload - def __sub__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NoReturn: ... + def __sub__(self: NDArray[bool_], other: _ArrayLikeBool_co, /) -> NoReturn: ... @overload - def __sub__(self: NDArray[np.bool], other: _ArrayLike[_NumberT], /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] + def __sub__[ScalarT: number](self: NDArray[bool_], other: _ArrayLike[ScalarT], /) -> NDArray[ScalarT]: ... @overload def __sub__(self: NDArray[float64], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... @overload @@ -2920,15 +3048,15 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def __sub__(self: _ArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> NDArray[complex128]: ... @overload - def __sub__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] + def __sub__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... @overload - def __sub__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... # type: ignore[overload-overlap] + def __sub__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... @overload - def __sub__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... # type: ignore[overload-overlap] + def __sub__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... @overload - def __sub__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating]: ... # type: ignore[overload-overlap] + def __sub__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating]: ... @overload - def __sub__(self: NDArray[number], other: _ArrayLikeNumber_co, /) -> NDArray[number]: ... # type: ignore[overload-overlap] + def __sub__(self: NDArray[number], other: _ArrayLikeNumber_co, /) -> NDArray[number]: ... @overload def __sub__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co, /) -> NDArray[timedelta64]: ... @overload @@ -2940,14 +3068,15 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def __sub__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + # Keep in sync with `MaskedArray.__rsub__` @overload - def __rsub__(self: NDArray[_NumberT], other: int | np.bool, /) -> ndarray[_ShapeT_co, dtype[_NumberT]]: ... + def __rsub__[ScalarT: number](self: NDArray[ScalarT], other: int | bool_, /) -> ndarray[_ShapeT_co, dtype[ScalarT]]: ... @overload - def __rsub__(self: NDArray[_NumberT], other: _ArrayLikeBool_co, /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] + def __rsub__[ScalarT: number](self: NDArray[ScalarT], other: _ArrayLikeBool_co, /) -> NDArray[ScalarT]: ... @overload - def __rsub__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NoReturn: ... + def __rsub__(self: NDArray[bool_], other: _ArrayLikeBool_co, /) -> NoReturn: ... @overload - def __rsub__(self: NDArray[np.bool], other: _ArrayLike[_NumberT], /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] + def __rsub__[ScalarT: number](self: NDArray[bool_], other: _ArrayLike[ScalarT], /) -> NDArray[ScalarT]: ... @overload def __rsub__(self: NDArray[float64], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... @overload @@ -2957,15 +3086,15 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def __rsub__(self: _ArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> NDArray[complex128]: ... @overload - def __rsub__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] + def __rsub__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... @overload - def __rsub__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... # type: ignore[overload-overlap] + def __rsub__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... @overload - def __rsub__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... # type: ignore[overload-overlap] + def __rsub__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... @overload - def __rsub__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating]: ... # type: ignore[overload-overlap] + def __rsub__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating]: ... @overload - def __rsub__(self: NDArray[number], other: _ArrayLikeNumber_co, /) -> NDArray[number]: ... # type: ignore[overload-overlap] + def __rsub__(self: NDArray[number], other: _ArrayLikeNumber_co, /) -> NDArray[number]: ... @overload def __rsub__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co, /) -> NDArray[timedelta64]: ... @overload @@ -2977,14 +3106,15 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def __rsub__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + # Keep in sync with `MaskedArray.__mul__` @overload - def __mul__(self: NDArray[_NumberT], other: int | np.bool, /) -> ndarray[_ShapeT_co, dtype[_NumberT]]: ... + def __mul__[ScalarT: number](self: NDArray[ScalarT], other: int | bool_, /) -> ndarray[_ShapeT_co, dtype[ScalarT]]: ... @overload - def __mul__(self: NDArray[_NumberT], other: _ArrayLikeBool_co, /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] + def __mul__[ScalarT: number](self: NDArray[ScalarT], other: _ArrayLikeBool_co, /) -> NDArray[ScalarT]: ... @overload - def __mul__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... # type: ignore[overload-overlap] + def __mul__(self: NDArray[bool_], other: _ArrayLikeBool_co, /) -> NDArray[bool_]: ... @overload - def __mul__(self: NDArray[np.bool], other: _ArrayLike[_NumberT], /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] + def __mul__[ScalarT: number](self: NDArray[bool_], other: _ArrayLike[ScalarT], /) -> NDArray[ScalarT]: ... @overload def __mul__(self: NDArray[float64], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... @overload @@ -2994,13 +3124,13 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def __mul__(self: _ArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> NDArray[complex128]: ... @overload - def __mul__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] + def __mul__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... @overload - def __mul__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... # type: ignore[overload-overlap] + def __mul__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... @overload - def __mul__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... # type: ignore[overload-overlap] + def __mul__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... @overload - def __mul__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating]: ... # type: ignore[overload-overlap] + def __mul__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating]: ... @overload def __mul__(self: NDArray[number], other: _ArrayLikeNumber_co, /) -> NDArray[number]: ... @overload @@ -3018,14 +3148,15 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def __mul__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + # Keep in sync with `MaskedArray.__rmul__` @overload # signature equivalent to __mul__ - def __rmul__(self: NDArray[_NumberT], other: int | np.bool, /) -> ndarray[_ShapeT_co, dtype[_NumberT]]: ... + def __rmul__[ScalarT: number](self: NDArray[ScalarT], other: int | bool_, /) -> ndarray[_ShapeT_co, dtype[ScalarT]]: ... @overload - def __rmul__(self: NDArray[_NumberT], other: _ArrayLikeBool_co, /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] + def __rmul__[ScalarT: number](self: NDArray[ScalarT], other: _ArrayLikeBool_co, /) -> NDArray[ScalarT]: ... @overload - def __rmul__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... # type: ignore[overload-overlap] + def __rmul__(self: NDArray[bool_], other: _ArrayLikeBool_co, /) -> NDArray[bool_]: ... @overload - def __rmul__(self: NDArray[np.bool], other: _ArrayLike[_NumberT], /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] + def __rmul__[ScalarT: number](self: NDArray[bool_], other: _ArrayLike[ScalarT], /) -> NDArray[ScalarT]: ... @overload def __rmul__(self: NDArray[float64], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... @overload @@ -3035,13 +3166,13 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def __rmul__(self: _ArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> NDArray[complex128]: ... @overload - def __rmul__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] + def __rmul__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... @overload - def __rmul__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... # type: ignore[overload-overlap] + def __rmul__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... @overload - def __rmul__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... # type: ignore[overload-overlap] + def __rmul__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... @overload - def __rmul__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating]: ... # type: ignore[overload-overlap] + def __rmul__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating]: ... @overload def __rmul__(self: NDArray[number], other: _ArrayLikeNumber_co, /) -> NDArray[number]: ... @overload @@ -3059,6 +3190,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def __rmul__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + # Keep in sync with `MaskedArray.__truediv__` @overload def __truediv__(self: _ArrayInt_co | NDArray[float64], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... @overload @@ -3090,6 +3222,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def __truediv__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + # Keep in sync with `MaskedArray.__rtruediv__` @overload def __rtruediv__(self: _ArrayInt_co | NDArray[float64], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... @overload @@ -3119,22 +3252,25 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def __rtruediv__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + # Keep in sync with `MaskedArray.__floordiv__` @overload - def __floordiv__(self: NDArray[_RealNumberT], other: int | np.bool, /) -> ndarray[_ShapeT_co, dtype[_RealNumberT]]: ... + def __floordiv__[ScalarT: integer | floating]( + self: NDArray[ScalarT], other: int | bool_, / + ) -> ndarray[_ShapeT_co, dtype[ScalarT]]: ... @overload - def __floordiv__(self: NDArray[_RealNumberT], other: _ArrayLikeBool_co, /) -> NDArray[_RealNumberT]: ... # type: ignore[overload-overlap] + def __floordiv__[ScalarT: integer | floating](self: NDArray[ScalarT], other: _ArrayLikeBool_co, /) -> NDArray[ScalarT]: ... @overload - def __floordiv__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... # type: ignore[overload-overlap] + def __floordiv__(self: NDArray[bool_], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... @overload - def __floordiv__(self: NDArray[np.bool], other: _ArrayLike[_RealNumberT], /) -> NDArray[_RealNumberT]: ... # type: ignore[overload-overlap] + def __floordiv__[ScalarT: integer | floating](self: NDArray[bool_], other: _ArrayLike[ScalarT], /) -> NDArray[ScalarT]: ... @overload def __floordiv__(self: NDArray[float64], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... @overload def __floordiv__(self: _ArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> NDArray[float64]: ... @overload - def __floordiv__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] + def __floordiv__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... @overload - def __floordiv__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... # type: ignore[overload-overlap] + def __floordiv__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... @overload def __floordiv__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... @overload @@ -3148,24 +3284,27 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def __floordiv__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + # Keep in sync with `MaskedArray.__rfloordiv__` @overload - def __rfloordiv__(self: NDArray[_RealNumberT], other: int | np.bool, /) -> ndarray[_ShapeT_co, dtype[_RealNumberT]]: ... + def __rfloordiv__[ScalarT: integer | floating]( + self: NDArray[ScalarT], other: int | bool_, / + ) -> ndarray[_ShapeT_co, dtype[ScalarT]]: ... @overload - def __rfloordiv__(self: NDArray[_RealNumberT], other: _ArrayLikeBool_co, /) -> NDArray[_RealNumberT]: ... # type: ignore[overload-overlap] + def __rfloordiv__[ScalarT: integer | floating](self: NDArray[ScalarT], other: _ArrayLikeBool_co, /) -> NDArray[ScalarT]: ... @overload - def __rfloordiv__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... # type: ignore[overload-overlap] + def __rfloordiv__(self: NDArray[bool_], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... @overload - def __rfloordiv__(self: NDArray[np.bool], other: _ArrayLike[_RealNumberT], /) -> NDArray[_RealNumberT]: ... # type: ignore[overload-overlap] + def __rfloordiv__[ScalarT: integer | floating](self: NDArray[bool_], other: _ArrayLike[ScalarT], /) -> NDArray[ScalarT]: ... @overload def __rfloordiv__(self: NDArray[float64], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... @overload def __rfloordiv__(self: _ArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> NDArray[float64]: ... @overload - def __rfloordiv__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] + def __rfloordiv__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... @overload - def __rfloordiv__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... # type: ignore[overload-overlap] + def __rfloordiv__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... @overload - def __rfloordiv__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... # type: ignore[overload-overlap] + def __rfloordiv__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... @overload def __rfloordiv__(self: NDArray[timedelta64], other: _ArrayLike[timedelta64], /) -> NDArray[int64]: ... @overload @@ -3175,14 +3314,15 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def __rfloordiv__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + # Keep in sync with `MaskedArray.__pow__` @overload - def __pow__(self: NDArray[_NumberT], other: int | np.bool, mod: None = None, /) -> ndarray[_ShapeT_co, dtype[_NumberT]]: ... + def __pow__[ScalarT: number](self: NDArray[ScalarT], other: int | bool_, mod: None = None, /) -> ndarray[_ShapeT_co, dtype[ScalarT]]: ... @overload - def __pow__(self: NDArray[_NumberT], other: _ArrayLikeBool_co, mod: None = None, /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] + def __pow__[ScalarT: number](self: NDArray[ScalarT], other: _ArrayLikeBool_co, mod: None = None, /) -> NDArray[ScalarT]: ... @overload - def __pow__(self: NDArray[np.bool], other: _ArrayLikeBool_co, mod: None = None, /) -> NDArray[int8]: ... # type: ignore[overload-overlap] + def __pow__(self: NDArray[bool_], other: _ArrayLikeBool_co, mod: None = None, /) -> NDArray[int8]: ... @overload - def __pow__(self: NDArray[np.bool], other: _ArrayLike[_NumberT], mod: None = None, /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] + def __pow__[ScalarT: number](self: NDArray[bool_], other: _ArrayLike[ScalarT], mod: None = None, /) -> NDArray[ScalarT]: ... @overload def __pow__(self: NDArray[float64], other: _ArrayLikeFloat64_co, mod: None = None, /) -> NDArray[float64]: ... @overload @@ -3194,11 +3334,11 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): self: _ArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], mod: None = None, / ) -> NDArray[complex128]: ... @overload - def __pow__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, mod: None = None, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] + def __pow__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, mod: None = None, /) -> NDArray[unsignedinteger]: ... @overload - def __pow__(self: _ArrayInt_co, other: _ArrayLikeInt_co, mod: None = None, /) -> NDArray[signedinteger]: ... # type: ignore[overload-overlap] + def __pow__(self: _ArrayInt_co, other: _ArrayLikeInt_co, mod: None = None, /) -> NDArray[signedinteger]: ... @overload - def __pow__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, mod: None = None, /) -> NDArray[floating]: ... # type: ignore[overload-overlap] + def __pow__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, mod: None = None, /) -> NDArray[floating]: ... @overload def __pow__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, mod: None = None, /) -> NDArray[complexfloating]: ... @overload @@ -3208,14 +3348,15 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def __pow__(self: NDArray[Any], other: _ArrayLikeObject_co, mod: None = None, /) -> Any: ... + # Keep in sync with `MaskedArray.__rpow__` @overload - def __rpow__(self: NDArray[_NumberT], other: int | np.bool, mod: None = None, /) -> ndarray[_ShapeT_co, dtype[_NumberT]]: ... + def __rpow__[ScalarT: number](self: NDArray[ScalarT], other: int | bool_, mod: None = None, /) -> ndarray[_ShapeT_co, dtype[ScalarT]]: ... @overload - def __rpow__(self: NDArray[_NumberT], other: _ArrayLikeBool_co, mod: None = None, /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] + def __rpow__[ScalarT: number](self: NDArray[ScalarT], other: _ArrayLikeBool_co, mod: None = None, /) -> NDArray[ScalarT]: ... @overload - def __rpow__(self: NDArray[np.bool], other: _ArrayLikeBool_co, mod: None = None, /) -> NDArray[int8]: ... # type: ignore[overload-overlap] + def __rpow__(self: NDArray[bool_], other: _ArrayLikeBool_co, mod: None = None, /) -> NDArray[int8]: ... @overload - def __rpow__(self: NDArray[np.bool], other: _ArrayLike[_NumberT], mod: None = None, /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] + def __rpow__[ScalarT: number](self: NDArray[bool_], other: _ArrayLike[ScalarT], mod: None = None, /) -> NDArray[ScalarT]: ... @overload def __rpow__(self: NDArray[float64], other: _ArrayLikeFloat64_co, mod: None = None, /) -> NDArray[float64]: ... @overload @@ -3227,11 +3368,11 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): self: _ArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], mod: None = None, / ) -> NDArray[complex128]: ... @overload - def __rpow__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, mod: None = None, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] + def __rpow__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, mod: None = None, /) -> NDArray[unsignedinteger]: ... @overload - def __rpow__(self: _ArrayInt_co, other: _ArrayLikeInt_co, mod: None = None, /) -> NDArray[signedinteger]: ... # type: ignore[overload-overlap] + def __rpow__(self: _ArrayInt_co, other: _ArrayLikeInt_co, mod: None = None, /) -> NDArray[signedinteger]: ... @overload - def __rpow__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, mod: None = None, /) -> NDArray[floating]: ... # type: ignore[overload-overlap] + def __rpow__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, mod: None = None, /) -> NDArray[floating]: ... @overload def __rpow__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, mod: None = None, /) -> NDArray[complexfloating]: ... @overload @@ -3242,9 +3383,9 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): def __rpow__(self: NDArray[Any], other: _ArrayLikeObject_co, mod: None = None, /) -> Any: ... @overload - def __lshift__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... # type: ignore[misc] + def __lshift__(self: NDArray[bool_], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... @overload - def __lshift__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[misc] + def __lshift__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... @overload def __lshift__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... @overload @@ -3253,9 +3394,9 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): def __lshift__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... @overload - def __rlshift__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... # type: ignore[misc] + def __rlshift__(self: NDArray[bool_], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... @overload - def __rlshift__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[misc] + def __rlshift__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... @overload def __rlshift__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... @overload @@ -3264,9 +3405,9 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): def __rlshift__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... @overload - def __rshift__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... # type: ignore[misc] + def __rshift__(self: NDArray[bool_], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... @overload - def __rshift__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[misc] + def __rshift__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... @overload def __rshift__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... @overload @@ -3275,9 +3416,9 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): def __rshift__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... @overload - def __rrshift__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... # type: ignore[misc] + def __rrshift__(self: NDArray[bool_], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... @overload - def __rrshift__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[misc] + def __rrshift__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... @overload def __rrshift__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... @overload @@ -3286,9 +3427,9 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): def __rrshift__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... @overload - def __and__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... # type: ignore[misc] + def __and__(self: NDArray[bool_], other: _ArrayLikeBool_co, /) -> NDArray[bool_]: ... @overload - def __and__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[misc] + def __and__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... @overload def __and__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... @overload @@ -3297,9 +3438,9 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): def __and__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... @overload - def __rand__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... # type: ignore[misc] + def __rand__(self: NDArray[bool_], other: _ArrayLikeBool_co, /) -> NDArray[bool_]: ... @overload - def __rand__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[misc] + def __rand__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... @overload def __rand__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... @overload @@ -3308,9 +3449,9 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): def __rand__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... @overload - def __xor__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... # type: ignore[misc] + def __xor__(self: NDArray[bool_], other: _ArrayLikeBool_co, /) -> NDArray[bool_]: ... @overload - def __xor__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[misc] + def __xor__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... @overload def __xor__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... @overload @@ -3319,9 +3460,9 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): def __xor__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... @overload - def __rxor__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... # type: ignore[misc] + def __rxor__(self: NDArray[bool_], other: _ArrayLikeBool_co, /) -> NDArray[bool_]: ... @overload - def __rxor__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[misc] + def __rxor__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... @overload def __rxor__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... @overload @@ -3330,9 +3471,9 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): def __rxor__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... @overload - def __or__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... # type: ignore[misc] + def __or__(self: NDArray[bool_], other: _ArrayLikeBool_co, /) -> NDArray[bool_]: ... @overload - def __or__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[misc] + def __or__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... @overload def __or__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... @overload @@ -3341,9 +3482,9 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): def __or__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... @overload - def __ror__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... # type: ignore[misc] + def __ror__(self: NDArray[bool_], other: _ArrayLikeBool_co, /) -> NDArray[bool_]: ... @overload - def __ror__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[misc] + def __ror__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... @overload def __ror__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... @overload @@ -3359,209 +3500,141 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): # object and its value is >= 0 # NOTE: Due to a mypy bug, overloading on e.g. `self: NDArray[SCT_floating]` won't # work, as this will lead to `false negatives` when using these inplace ops. + + # += + @overload # type: ignore[misc] + def __iadd__[ArrayT: NDArray[bool_]](self: ArrayT, other: _ArrayLikeBool_co, /) -> ArrayT: ... @overload - def __iadd__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... - @overload - def __iadd__( - self: NDArray[unsignedinteger], - other: _ArrayLikeUInt_co | _IntLike_co, - /, - ) -> ndarray[_ShapeT_co, _DTypeT_co]: ... - @overload - def __iadd__(self: NDArray[signedinteger], other: _ArrayLikeInt_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + def __iadd__[ArrayT: NDArray[complexfloating]](self: ArrayT, other: _ArrayLikeComplex_co, /) -> ArrayT: ... @overload - def __iadd__(self: NDArray[floating], other: _ArrayLikeFloat_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + def __iadd__[ArrayT: NDArray[inexact]](self: ArrayT, other: _ArrayLikeFloat_co, /) -> ArrayT: ... @overload - def __iadd__(self: NDArray[complexfloating], other: _ArrayLikeComplex_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + def __iadd__[ArrayT: NDArray[number]](self: ArrayT, other: _ArrayLikeInt_co, /) -> ArrayT: ... @overload - def __iadd__(self: NDArray[timedelta64 | datetime64], other: _ArrayLikeTD64_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + def __iadd__[ArrayT: NDArray[datetime64 | timedelta64]](self: ArrayT, other: _ArrayLikeTD64_co, /) -> ArrayT: ... @overload - def __iadd__(self: NDArray[bytes_], other: _ArrayLikeBytes_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + def __iadd__[ArrayT: NDArray[bytes_]](self: ArrayT, other: _ArrayLikeBytes_co, /) -> ArrayT: ... @overload - def __iadd__( - self: ndarray[Any, dtype[str_] | dtypes.StringDType], - other: _ArrayLikeStr_co | _ArrayLikeString_co, - /, - ) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + def __iadd__[ArrayT: _ArrayString](self: ArrayT, other: _ArrayLikeStr_co | _ArrayLikeString_co, /) -> ArrayT: ... @overload - def __iadd__(self: NDArray[object_], other: Any, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + def __iadd__[ArrayT: NDArray[object_]](self: ArrayT, other: object, /) -> ArrayT: ... - # - @overload - def __isub__( - self: NDArray[unsignedinteger], - other: _ArrayLikeUInt_co | _IntLike_co, - /, - ) -> ndarray[_ShapeT_co, _DTypeT_co]: ... - @overload - def __isub__(self: NDArray[signedinteger], other: _ArrayLikeInt_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + # -= + @overload # type: ignore[misc] + def __isub__[ArrayT: NDArray[complexfloating]](self: ArrayT, other: _ArrayLikeComplex_co, /) -> ArrayT: ... @overload - def __isub__(self: NDArray[floating], other: _ArrayLikeFloat_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + def __isub__[ArrayT: NDArray[inexact]](self: ArrayT, other: _ArrayLikeFloat_co, /) -> ArrayT: ... @overload - def __isub__(self: NDArray[complexfloating], other: _ArrayLikeComplex_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + def __isub__[ArrayT: NDArray[number]](self: ArrayT, other: _ArrayLikeInt_co, /) -> ArrayT: ... @overload - def __isub__(self: NDArray[timedelta64 | datetime64], other: _ArrayLikeTD64_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + def __isub__[ArrayT: NDArray[datetime64 | timedelta64]](self: ArrayT, other: _ArrayLikeTD64_co, /) -> ArrayT: ... @overload - def __isub__(self: NDArray[object_], other: Any, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + def __isub__[ArrayT: NDArray[object_]](self: ArrayT, other: object, /) -> ArrayT: ... - # - @overload - def __imul__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + # *= + @overload # type: ignore[misc] + def __imul__[ArrayT: NDArray[bool_]](self: ArrayT, other: _ArrayLikeBool_co, /) -> ArrayT: ... @overload - def __imul__( - self: NDArray[unsignedinteger], - other: _ArrayLikeUInt_co | _IntLike_co, - /, - ) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + def __imul__[ArrayT: NDArray[complexfloating]](self: ArrayT, other: _ArrayLikeComplex_co, /) -> ArrayT: ... @overload - def __imul__( - self: ndarray[Any, dtype[signedinteger | character] | dtypes.StringDType], - other: _ArrayLikeInt_co, - /, - ) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + def __imul__[ArrayT: NDArray[inexact | timedelta64]](self: ArrayT, other: _ArrayLikeFloat_co, /) -> ArrayT: ... @overload - def __imul__(self: NDArray[floating | timedelta64], other: _ArrayLikeFloat_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + def __imul__[ArrayT: NDArray[number | character]](self: ArrayT, other: _ArrayLikeInt_co, /) -> ArrayT: ... @overload - def __imul__(self: NDArray[complexfloating], other: _ArrayLikeComplex_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + def __imul__[ArrayT: _ArrayString](self: ArrayT, other: _ArrayLikeInt_co, /) -> ArrayT: ... @overload - def __imul__(self: NDArray[object_], other: Any, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + def __imul__[ArrayT: NDArray[object_]](self: ArrayT, other: object, /) -> ArrayT: ... + # @= + @overload # type: ignore[misc] + def __imatmul__[ArrayT: NDArray[bool_]](self: ArrayT, other: _ArrayLikeBool_co, /) -> ArrayT: ... @overload - def __ipow__( - self: NDArray[unsignedinteger], - other: _ArrayLikeUInt_co | _IntLike_co, - /, - ) -> ndarray[_ShapeT_co, _DTypeT_co]: ... - @overload - def __ipow__(self: NDArray[signedinteger], other: _ArrayLikeInt_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + def __imatmul__[ArrayT: NDArray[complexfloating]](self: ArrayT, other: _ArrayLikeComplex_co, /) -> ArrayT: ... @overload - def __ipow__(self: NDArray[floating], other: _ArrayLikeFloat_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + def __imatmul__[ArrayT: NDArray[inexact]](self: ArrayT, other: _ArrayLikeFloat_co, /) -> ArrayT: ... @overload - def __ipow__(self: NDArray[complexfloating], other: _ArrayLikeComplex_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + def __imatmul__[ArrayT: NDArray[number]](self: ArrayT, other: _ArrayLikeInt_co, /) -> ArrayT: ... @overload - def __ipow__(self: NDArray[object_], other: Any, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + def __imatmul__[ArrayT: NDArray[object_]](self: ArrayT, other: object, /) -> ArrayT: ... - # + # **= + @overload # type: ignore[misc] + def __ipow__[ArrayT: NDArray[complexfloating]](self: ArrayT, other: _ArrayLikeComplex_co, /) -> ArrayT: ... @overload - def __itruediv__(self: NDArray[floating | timedelta64], other: _ArrayLikeFloat_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + def __ipow__[ArrayT: NDArray[inexact]](self: ArrayT, other: _ArrayLikeFloat_co, /) -> ArrayT: ... @overload - def __itruediv__(self: NDArray[complexfloating], other: _ArrayLikeComplex_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + def __ipow__[ArrayT: NDArray[number]](self: ArrayT, other: _ArrayLikeInt_co, /) -> ArrayT: ... @overload - def __itruediv__(self: NDArray[object_], other: Any, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + def __ipow__[ArrayT: NDArray[object_]](self: ArrayT, other: object, /) -> ArrayT: ... - # keep in sync with `__imod__` + # /= + @overload # type: ignore[misc] + def __itruediv__[ArrayT: NDArray[complexfloating]](self: ArrayT, other: _ArrayLikeComplex_co, /) -> ArrayT: ... @overload - def __ifloordiv__( - self: NDArray[unsignedinteger], - other: _ArrayLikeUInt_co | _IntLike_co, - /, - ) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + def __itruediv__[ArrayT: NDArray[inexact | timedelta64]](self: ArrayT, other: _ArrayLikeFloat_co, /) -> ArrayT: ... @overload - def __ifloordiv__(self: NDArray[signedinteger], other: _ArrayLikeInt_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + def __itruediv__[ArrayT: NDArray[object_]](self: ArrayT, other: object, /) -> ArrayT: ... + + # //= + # keep in sync with `__imod__` + @overload # type: ignore[misc] + def __ifloordiv__[ArrayT: NDArray[integer]](self: ArrayT, other: _ArrayLikeInt_co, /) -> ArrayT: ... @overload - def __ifloordiv__(self: NDArray[floating | timedelta64], other: _ArrayLikeFloat_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + def __ifloordiv__[ArrayT: NDArray[floating | timedelta64]](self: ArrayT, other: _ArrayLikeFloat_co, /) -> ArrayT: ... @overload - def __ifloordiv__(self: NDArray[object_], other: Any, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + def __ifloordiv__[ArrayT: NDArray[object_]](self: ArrayT, other: object, /) -> ArrayT: ... + # %= # keep in sync with `__ifloordiv__` + @overload # type: ignore[misc] + def __imod__[ArrayT: NDArray[integer]](self: ArrayT, other: _ArrayLikeInt_co, /) -> ArrayT: ... @overload - def __imod__( - self: NDArray[unsignedinteger], - other: _ArrayLikeUInt_co | _IntLike_co, - /, - ) -> ndarray[_ShapeT_co, _DTypeT_co]: ... - @overload - def __imod__(self: NDArray[signedinteger], other: _ArrayLikeInt_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... - @overload - def __imod__(self: NDArray[floating], other: _ArrayLikeFloat_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + def __imod__[ArrayT: NDArray[floating]](self: ArrayT, other: _ArrayLikeFloat_co, /) -> ArrayT: ... @overload - def __imod__( - self: NDArray[timedelta64], - other: _SupportsArray[_dtype[timedelta64]] | _NestedSequence[_SupportsArray[_dtype[timedelta64]]], - /, - ) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + def __imod__[ArrayT: NDArray[timedelta64]](self: ArrayT, other: _ArrayLike[timedelta64], /) -> ArrayT: ... @overload - def __imod__(self: NDArray[object_], other: Any, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + def __imod__[ArrayT: NDArray[object_]](self: ArrayT, other: object, /) -> ArrayT: ... + # <<= # keep in sync with `__irshift__` + @overload # type: ignore[misc] + def __ilshift__[ArrayT: NDArray[integer]](self: ArrayT, other: _ArrayLikeInt_co, /) -> ArrayT: ... @overload - def __ilshift__( - self: NDArray[unsignedinteger], - other: _ArrayLikeUInt_co | _IntLike_co, - /, - ) -> ndarray[_ShapeT_co, _DTypeT_co]: ... - @overload - def __ilshift__(self: NDArray[signedinteger], other: _ArrayLikeInt_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... - @overload - def __ilshift__(self: NDArray[object_], other: Any, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + def __ilshift__[ArrayT: NDArray[object_]](self: ArrayT, other: object, /) -> ArrayT: ... + # >>= # keep in sync with `__ilshift__` + @overload # type: ignore[misc] + def __irshift__[ArrayT: NDArray[integer]](self: ArrayT, other: _ArrayLikeInt_co, /) -> ArrayT: ... @overload - def __irshift__( - self: NDArray[unsignedinteger], - other: _ArrayLikeUInt_co | _IntLike_co, - /, - ) -> ndarray[_ShapeT_co, _DTypeT_co]: ... - @overload - def __irshift__(self: NDArray[signedinteger], other: _ArrayLikeInt_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... - @overload - def __irshift__(self: NDArray[object_], other: Any, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + def __irshift__[ArrayT: NDArray[object_]](self: ArrayT, other: object, /) -> ArrayT: ... + # &= # keep in sync with `__ixor__` and `__ior__` + @overload # type: ignore[misc] + def __iand__[ArrayT: NDArray[bool_]](self: ArrayT, other: _ArrayLikeBool_co, /) -> ArrayT: ... @overload - def __iand__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + def __iand__[ArrayT: NDArray[integer]](self: ArrayT, other: _ArrayLikeInt_co, /) -> ArrayT: ... @overload - def __iand__( - self: NDArray[unsignedinteger], - other: _ArrayLikeUInt_co | _IntLike_co, - /, - ) -> ndarray[_ShapeT_co, _DTypeT_co]: ... - @overload - def __iand__(self: NDArray[signedinteger], other: _ArrayLikeInt_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... - @overload - def __iand__(self: NDArray[object_], other: Any, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + def __iand__[ArrayT: NDArray[object_]](self: ArrayT, other: object, /) -> ArrayT: ... + # ^= # keep in sync with `__iand__` and `__ior__` + @overload # type: ignore[misc] + def __ixor__[ArrayT: NDArray[bool_]](self: ArrayT, other: _ArrayLikeBool_co, /) -> ArrayT: ... @overload - def __ixor__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... - @overload - def __ixor__( - self: NDArray[unsignedinteger], - other: _ArrayLikeUInt_co | _IntLike_co, - /, - ) -> ndarray[_ShapeT_co, _DTypeT_co]: ... - @overload - def __ixor__(self: NDArray[signedinteger], other: _ArrayLikeInt_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + def __ixor__[ArrayT: NDArray[integer]](self: ArrayT, other: _ArrayLikeInt_co, /) -> ArrayT: ... @overload - def __ixor__(self: NDArray[object_], other: Any, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + def __ixor__[ArrayT: NDArray[object_]](self: ArrayT, other: object, /) -> ArrayT: ... + # |= # keep in sync with `__iand__` and `__ixor__` + @overload # type: ignore[misc] + def __ior__[ArrayT: NDArray[bool_]](self: ArrayT, other: _ArrayLikeBool_co, /) -> ArrayT: ... @overload - def __ior__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... - @overload - def __ior__( - self: NDArray[unsignedinteger], - other: _ArrayLikeUInt_co | _IntLike_co, - /, - ) -> ndarray[_ShapeT_co, _DTypeT_co]: ... - @overload - def __ior__(self: NDArray[signedinteger], other: _ArrayLikeInt_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... - @overload - def __ior__(self: NDArray[object_], other: Any, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... - - # - @overload - def __imatmul__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... - @overload - def __imatmul__(self: NDArray[unsignedinteger], other: _ArrayLikeUInt_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... - @overload - def __imatmul__(self: NDArray[signedinteger], other: _ArrayLikeInt_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... - @overload - def __imatmul__(self: NDArray[floating], other: _ArrayLikeFloat_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + def __ior__[ArrayT: NDArray[integer]](self: ArrayT, other: _ArrayLikeInt_co, /) -> ArrayT: ... @overload - def __imatmul__(self: NDArray[complexfloating], other: _ArrayLikeComplex_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... - @overload - def __imatmul__(self: NDArray[object_], other: Any, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + def __ior__[ArrayT: NDArray[object_]](self: ArrayT, other: object, /) -> ArrayT: ... # def __dlpack__( @@ -3571,7 +3644,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): stream: int | Any | None = None, max_version: tuple[int, int] | None = None, dl_device: tuple[int, int] | None = None, - copy: builtins.bool | None = None, + copy: py_bool | None = None, ) -> CapsuleType: ... def __dlpack_device__(self, /) -> tuple[L[1], L[0]]: ... @@ -3587,14 +3660,70 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): # See https://github.com/numpy/numpy-stubs/pull/80 for more details. class generic(_ArrayOrScalarCommon, Generic[_ItemT_co]): @abstractmethod - def __init__(self, *args: Any, **kwargs: Any) -> None: ... - def __hash__(self) -> int: ... + def __new__(cls, /, *args: Any, **kwargs: Any) -> Self: ... + + def __buffer__(self, flags: int, /) -> memoryview: ... + @overload def __array__(self, dtype: None = None, /) -> ndarray[tuple[()], dtype[Self]]: ... @overload - def __array__(self, dtype: _DTypeT, /) -> ndarray[tuple[()], _DTypeT]: ... - if sys.version_info >= (3, 12): - def __buffer__(self, flags: int, /) -> memoryview: ... + def __array__[DTypeT: dtype](self, dtype: DTypeT, /) -> ndarray[tuple[()], DTypeT]: ... + + # + @overload + def __getitem__(self, key: tuple[()], /) -> Self: ... + @overload + def __getitem__( + self, key: EllipsisType | tuple[EllipsisType], / + ) -> ndarray[tuple[()], dtype[Self]]: ... + @overload + def __getitem__( + self, key: None | tuple[None], / + ) -> ndarray[tuple[int], dtype[Self]]: ... + @overload + def __getitem__( + self, key: tuple[None, None], / + ) -> ndarray[tuple[int, int], dtype[Self]]: ... + @overload + def __getitem__( + self, key: tuple[None, None, None], / + ) -> ndarray[tuple[int, int, int], dtype[Self]]: ... + @overload # Limited support for (None,) * N > 3 + def __getitem__(self, key: tuple[None, ...], /) -> NDArray[Self]: ... + + # + @overload + def __array_wrap__[ShapeT: _Shape, DTypeT: dtype]( + self, + array: ndarray[ShapeT, DTypeT], + context: tuple[ufunc, tuple[object, ...], int] | None, + return_scalar: L[False], + /, + ) -> ndarray[ShapeT, DTypeT]: ... + @overload + def __array_wrap__[ScalarT: generic]( + self, + array: ndarray[tuple[()], dtype[ScalarT]], + context: tuple[ufunc, tuple[object, ...], int] | None = None, + return_scalar: L[True] = True, + /, + ) -> ScalarT: ... + @overload + def __array_wrap__[ShapeT: tuple[int, *tuple[int, ...]], DTypeT: dtype]( + self, + array: ndarray[ShapeT, DTypeT], + context: tuple[ufunc, tuple[object, ...], int] | None = None, + return_scalar: L[True] = True, + /, + ) -> ndarray[ShapeT, DTypeT]: ... + @overload + def __array_wrap__[ShapeT: _Shape, ScalarT: generic]( + self, + array: ndarray[ShapeT, dtype[ScalarT]], + context: tuple[ufunc, tuple[object, ...], int] | None = None, + return_scalar: L[True] = True, + /, + ) -> ScalarT | ndarray[ShapeT, dtype[ScalarT]]: ... @property def base(self) -> None: ... @@ -3613,106 +3742,131 @@ class generic(_ArrayOrScalarCommon, Generic[_ItemT_co]): def item(self, /) -> _ItemT_co: ... @overload def item(self, arg0: L[0, -1] | tuple[L[0, -1]] | tuple[()] = ..., /) -> _ItemT_co: ... + @override def tolist(self, /) -> _ItemT_co: ... - def byteswap(self, inplace: L[False] = ...) -> Self: ... + # NOTE: these technically exist, but will always raise when called + def trace( # type: ignore[misc] + self: Never, + /, + offset: L[0] = 0, + axis1: L[0] = 0, + axis2: L[1] = 1, + dtype: None = None, + out: None = None, + ) -> Never: ... + def diagonal(self: Never, /, offset: L[0] = 0, axis1: L[0] = 0, axis2: L[1] = 1) -> Never: ... # type: ignore[misc] + def swapaxes(self: Never, axis1: Never, axis2: Never, /) -> Never: ... # type: ignore[misc] + def sort(self: Never, /, axis: L[-1] = -1, kind: None = None, order: None = None, *, stable: None = None) -> Never: ... # type: ignore[misc] + def nonzero(self: Never, /) -> Never: ... # type: ignore[misc] + def setfield(self: Never, val: Never, /, dtype: Never, offset: L[0] = 0) -> None: ... # type: ignore[misc] + def searchsorted(self: Never, v: Never, /, side: L["left"] = "left", sorter: None = None) -> Never: ... # type: ignore[misc] + # NOTE: this wont't raise, but won't do anything either @overload - def astype( + def resize(self, /, *, refcheck: py_bool = True) -> None: ... + @overload + def resize(self, new_shape: L[0, -1] | tuple[L[0, -1]] | tuple[()], /, *, refcheck: py_bool = True) -> None: ... + + # + def byteswap(self, /, inplace: L[False] = False) -> Self: ... + + # + @overload + def astype[ScalarT: generic]( self, - dtype: _DTypeLike[_ScalarT], - order: _OrderKACF = ..., - casting: _CastingKind = ..., - subok: builtins.bool = ..., - copy: builtins.bool | _CopyMode = ..., - ) -> _ScalarT: ... + /, + dtype: _DTypeLike[ScalarT], + order: _OrderKACF = "K", + casting: _CastingKind = "unsafe", + subok: py_bool = True, + copy: py_bool | _CopyMode = True, + ) -> ScalarT: ... @overload def astype( self, - dtype: DTypeLike, - order: _OrderKACF = ..., - casting: _CastingKind = ..., - subok: builtins.bool = ..., - copy: builtins.bool | _CopyMode = ..., - ) -> Any: ... + /, + dtype: DTypeLike | None, + order: _OrderKACF = "K", + casting: _CastingKind = "unsafe", + subok: py_bool = True, + copy: py_bool | _CopyMode = True, + ) -> Incomplete: ... # NOTE: `view` will perform a 0D->scalar cast, # thus the array `type` is irrelevant to the output type @overload - def view(self, type: type[NDArray[Any]] = ...) -> Self: ... + def view(self, type: type[ndarray] = ...) -> Self: ... @overload - def view( - self, - dtype: _DTypeLike[_ScalarT], - type: type[NDArray[Any]] = ..., - ) -> _ScalarT: ... + def view[ScalarT: generic](self, /, dtype: _DTypeLike[ScalarT], type: type[ndarray] = ...) -> ScalarT: ... @overload - def view( - self, - dtype: DTypeLike, - type: type[NDArray[Any]] = ..., - ) -> Any: ... + def view(self, /, dtype: DTypeLike, type: type[ndarray] = ...) -> Incomplete: ... @overload - def getfield( - self, - dtype: _DTypeLike[_ScalarT], - offset: SupportsIndex = ... - ) -> _ScalarT: ... + def getfield[ScalarT: generic](self, /, dtype: _DTypeLike[ScalarT], offset: SupportsIndex = 0) -> ScalarT: ... @overload - def getfield( - self, - dtype: DTypeLike, - offset: SupportsIndex = ... - ) -> Any: ... + def getfield(self, /, dtype: DTypeLike, offset: SupportsIndex = 0) -> Incomplete: ... @overload - def take( # type: ignore[misc] + def take( self, indices: _IntLike_co, - axis: SupportsIndex | None = ..., - out: None = ..., - mode: _ModeKind = ..., + /, + axis: SupportsIndex | None = None, + out: None = None, + mode: _ModeKind = "raise", ) -> Self: ... @overload - def take( # type: ignore[misc] + def take( self, indices: _ArrayLikeInt_co, - axis: SupportsIndex | None = ..., - out: None = ..., - mode: _ModeKind = ..., + /, + axis: SupportsIndex | None = None, + out: None = None, + mode: _ModeKind = "raise", ) -> NDArray[Self]: ... @overload - def take( + def take[ArrayT: ndarray]( self, indices: _ArrayLikeInt_co, - axis: SupportsIndex | None = ..., - out: _ArrayT = ..., - mode: _ModeKind = ..., - ) -> _ArrayT: ... + /, + axis: SupportsIndex | None = None, + *, + out: ArrayT, + mode: _ModeKind = "raise", + ) -> ArrayT: ... + @overload + def take[ArrayT: ndarray]( + self, + indices: _ArrayLikeInt_co, + /, + axis: SupportsIndex | None, + out: ArrayT, + mode: _ModeKind = "raise", + ) -> ArrayT: ... - def repeat(self, repeats: _ArrayLikeInt_co, axis: SupportsIndex | None = None) -> ndarray[tuple[int], dtype[Self]]: ... + def repeat(self, repeats: _ArrayLikeInt_co, /, axis: SupportsIndex | None = None) -> ndarray[tuple[int], dtype[Self]]: ... def flatten(self, /, order: _OrderKACF = "C") -> ndarray[tuple[int], dtype[Self]]: ... def ravel(self, /, order: _OrderKACF = "C") -> ndarray[tuple[int], dtype[Self]]: ... - @overload # (() | []) + @overload # (()) def reshape( self, shape: tuple[()] | list[Never], /, *, order: _OrderACF = "C", - copy: builtins.bool | None = None, + copy: py_bool | None = None, ) -> Self: ... - @overload # ((1, *(1, ...))@_ShapeT) - def reshape( + @overload # (ShapeT: (index, ...)) + def reshape[ShapeT: tuple[int, *tuple[int, ...]]]( self, - shape: _1NShapeT, + shape: ShapeT, /, *, order: _OrderACF = "C", - copy: builtins.bool | None = None, - ) -> ndarray[_1NShapeT, dtype[Self]]: ... + copy: py_bool | None = None, + ) -> ndarray[ShapeT, dtype[Self]]: ... @overload # (Sequence[index, ...]) # not recommended def reshape( self, @@ -3720,8 +3874,8 @@ class generic(_ArrayOrScalarCommon, Generic[_ItemT_co]): /, *, order: _OrderACF = "C", - copy: builtins.bool | None = None, - ) -> Self | ndarray[tuple[L[1], ...], dtype[Self]]: ... + copy: py_bool | None = None, + ) -> NDArray[Self] | Any: ... @overload # _(index) def reshape( self, @@ -3729,8 +3883,8 @@ class generic(_ArrayOrScalarCommon, Generic[_ItemT_co]): /, *, order: _OrderACF = "C", - copy: builtins.bool | None = None, - ) -> ndarray[tuple[L[1]], dtype[Self]]: ... + copy: py_bool | None = None, + ) -> ndarray[tuple[int], dtype[Self]]: ... @overload # _(index, index) def reshape( self, @@ -3739,8 +3893,8 @@ class generic(_ArrayOrScalarCommon, Generic[_ItemT_co]): /, *, order: _OrderACF = "C", - copy: builtins.bool | None = None, - ) -> ndarray[tuple[L[1], L[1]], dtype[Self]]: ... + copy: py_bool | None = None, + ) -> ndarray[tuple[int, int], dtype[Self]]: ... @overload # _(index, index, index) def reshape( self, @@ -3750,8 +3904,8 @@ class generic(_ArrayOrScalarCommon, Generic[_ItemT_co]): /, *, order: _OrderACF = "C", - copy: builtins.bool | None = None, - ) -> ndarray[tuple[L[1], L[1], L[1]], dtype[Self]]: ... + copy: py_bool | None = None, + ) -> ndarray[tuple[int, int, int], dtype[Self]]: ... @overload # _(index, index, index, index) def reshape( self, @@ -3762,8 +3916,8 @@ class generic(_ArrayOrScalarCommon, Generic[_ItemT_co]): /, *, order: _OrderACF = "C", - copy: builtins.bool | None = None, - ) -> ndarray[tuple[L[1], L[1], L[1], L[1]], dtype[Self]]: ... + copy: py_bool | None = None, + ) -> ndarray[tuple[int, int, int, int], dtype[Self]]: ... @overload # _(index, index, index, index, index, *index) # ndim >= 5 def reshape( self, @@ -3775,8 +3929,8 @@ class generic(_ArrayOrScalarCommon, Generic[_ItemT_co]): /, *sizes6_: SupportsIndex, order: _OrderACF = "C", - copy: builtins.bool | None = None, - ) -> ndarray[tuple[L[1], L[1], L[1], L[1], L[1], *tuple[L[1], ...]], dtype[Self]]: ... + copy: py_bool | None = None, + ) -> ndarray[tuple[int, int, int, int, int, *tuple[int, ...]], dtype[Self]]: ... def squeeze(self, axis: L[0] | tuple[()] | None = ...) -> Self: ... def transpose(self, axes: tuple[()] | None = ..., /) -> Self: ... @@ -3789,28 +3943,28 @@ class generic(_ArrayOrScalarCommon, Generic[_ItemT_co]): out: None = None, keepdims: SupportsIndex = False, *, - where: builtins.bool | np.bool | ndarray[tuple[()], dtype[np.bool]] = True - ) -> np.bool: ... + where: py_bool | bool_ | ndarray[tuple[()], dtype[bool_]] = True + ) -> bool_: ... @overload - def all( + def all[ScalarT: generic]( self, /, axis: L[0, -1] | tuple[()] | None, - out: ndarray[tuple[()], dtype[_ScalarT]], + out: ndarray[tuple[()], dtype[ScalarT]], keepdims: SupportsIndex = False, *, - where: builtins.bool | np.bool | ndarray[tuple[()], dtype[np.bool]] = True, - ) -> _ScalarT: ... + where: py_bool | bool_ | ndarray[tuple[()], dtype[bool_]] = True, + ) -> ScalarT: ... @overload - def all( + def all[ScalarT: generic]( self, /, axis: L[0, -1] | tuple[()] | None = None, *, - out: ndarray[tuple[()], dtype[_ScalarT]], + out: ndarray[tuple[()], dtype[ScalarT]], keepdims: SupportsIndex = False, - where: builtins.bool | np.bool | ndarray[tuple[()], dtype[np.bool]] = True, - ) -> _ScalarT: ... + where: py_bool | bool_ | ndarray[tuple[()], dtype[bool_]] = True, + ) -> ScalarT: ... @overload def any( @@ -3820,59 +3974,80 @@ class generic(_ArrayOrScalarCommon, Generic[_ItemT_co]): out: None = None, keepdims: SupportsIndex = False, *, - where: builtins.bool | np.bool | ndarray[tuple[()], dtype[np.bool]] = True - ) -> np.bool: ... + where: py_bool | bool_ | ndarray[tuple[()], dtype[bool_]] = True + ) -> bool_: ... @overload - def any( + def any[ScalarT: generic]( self, /, axis: L[0, -1] | tuple[()] | None, - out: ndarray[tuple[()], dtype[_ScalarT]], + out: ndarray[tuple[()], dtype[ScalarT]], keepdims: SupportsIndex = False, *, - where: builtins.bool | np.bool | ndarray[tuple[()], dtype[np.bool]] = True, - ) -> _ScalarT: ... + where: py_bool | bool_ | ndarray[tuple[()], dtype[bool_]] = True, + ) -> ScalarT: ... @overload - def any( + def any[ScalarT: generic]( self, /, axis: L[0, -1] | tuple[()] | None = None, *, - out: ndarray[tuple[()], dtype[_ScalarT]], + out: ndarray[tuple[()], dtype[ScalarT]], keepdims: SupportsIndex = False, - where: builtins.bool | np.bool | ndarray[tuple[()], dtype[np.bool]] = True, - ) -> _ScalarT: ... + where: py_bool | bool_ | ndarray[tuple[()], dtype[bool_]] = True, + ) -> ScalarT: ... # Keep `dtype` at the bottom to avoid name conflicts with `np.dtype` @property def dtype(self) -> _dtype[Self]: ... -class number(generic[_NumberItemT_co], Generic[_NBit, _NumberItemT_co]): - @abstractmethod - def __init__(self, value: _NumberItemT_co, /) -> None: ... +class number(generic[_NumberItemT_co], Generic[_NBitT, _NumberItemT_co]): + @abstractmethod # `SupportsIndex | str | bytes` equivs `_ConvertibleToInt & _ConvertibleToFloat` + def __new__(cls, value: SupportsIndex | str | bytes = 0, /) -> Self: ... def __class_getitem__(cls, item: Any, /) -> GenericAlias: ... def __neg__(self) -> Self: ... def __pos__(self) -> Self: ... def __abs__(self) -> Self: ... - __add__: _NumberOp - __radd__: _NumberOp - __sub__: _NumberOp - __rsub__: _NumberOp - __mul__: _NumberOp - __rmul__: _NumberOp - __floordiv__: _NumberOp - __rfloordiv__: _NumberOp - __pow__: _NumberOp - __rpow__: _NumberOp - __truediv__: _NumberOp - __rtruediv__: _NumberOp - - __lt__: _ComparisonOpLT[_NumberLike_co, _ArrayLikeNumber_co] - __le__: _ComparisonOpLE[_NumberLike_co, _ArrayLikeNumber_co] - __gt__: _ComparisonOpGT[_NumberLike_co, _ArrayLikeNumber_co] - __ge__: _ComparisonOpGE[_NumberLike_co, _ArrayLikeNumber_co] + def __add__(self, other: _NumberLike_co, /) -> Incomplete: ... + def __radd__(self, other: _NumberLike_co, /) -> Incomplete: ... + def __sub__(self, other: _NumberLike_co, /) -> Incomplete: ... + def __rsub__(self, other: _NumberLike_co, /) -> Incomplete: ... + def __mul__(self, other: _NumberLike_co, /) -> Incomplete: ... + def __rmul__(self, other: _NumberLike_co, /) -> Incomplete: ... + def __pow__(self, other: _NumberLike_co, mod: None = None, /) -> Incomplete: ... + def __rpow__(self, other: _NumberLike_co, mod: None = None, /) -> Incomplete: ... + def __truediv__(self, other: _NumberLike_co, /) -> Incomplete: ... + def __rtruediv__(self, other: _NumberLike_co, /) -> Incomplete: ... + + @overload + def __lt__(self, other: _NumberLike_co, /) -> bool_: ... + @overload + def __lt__(self, other: _ArrayLikeNumber_co | _NestedSequence[_SupportsGT], /) -> NDArray[bool_]: ... + @overload + def __lt__(self, other: _SupportsGT, /) -> bool_: ... + + @overload + def __le__(self, other: _NumberLike_co, /) -> bool_: ... + @overload + def __le__(self, other: _ArrayLikeNumber_co | _NestedSequence[_SupportsGE], /) -> NDArray[bool_]: ... + @overload + def __le__(self, other: _SupportsGE, /) -> bool_: ... + + @overload + def __gt__(self, other: _NumberLike_co, /) -> bool_: ... + @overload + def __gt__(self, other: _ArrayLikeNumber_co | _NestedSequence[_SupportsLT], /) -> NDArray[bool_]: ... + @overload + def __gt__(self, other: _SupportsLT, /) -> bool_: ... + + @overload + def __ge__(self, other: _NumberLike_co, /) -> bool_: ... + @overload + def __ge__(self, other: _ArrayLikeNumber_co | _NestedSequence[_SupportsLE], /) -> NDArray[bool_]: ... + @overload + def __ge__(self, other: _SupportsLE, /) -> bool_: ... class bool(generic[_BoolItemT_co], Generic[_BoolItemT_co]): @property @@ -3882,276 +4057,1019 @@ class bool(generic[_BoolItemT_co], Generic[_BoolItemT_co]): @property def real(self) -> Self: ... @property - def imag(self) -> np.bool[L[False]]: ... + def imag(self) -> bool_[L[False]]: ... + @overload # mypy bug workaround: https://github.com/numpy/numpy/issues/29245 + def __new__(cls, value: Never, /) -> bool_[py_bool]: ... @overload - def __init__(self: np.bool[L[False]], /) -> None: ... - @overload - def __init__(self: np.bool[L[False]], value: _Falsy = ..., /) -> None: ... + def __new__(cls, value: _Falsy = ..., /) -> bool_[L[False]]: ... @overload - def __init__(self: np.bool[L[True]], value: _Truthy, /) -> None: ... + def __new__(cls, value: _Truthy, /) -> bool_[L[True]]: ... @overload - def __init__(self, value: object, /) -> None: ... + def __new__(cls, value: object, /) -> bool_[py_bool]: ... + + def __class_getitem__(cls, type_arg: type | object, /) -> GenericAlias: ... def __bool__(self, /) -> _BoolItemT_co: ... + @overload - def __int__(self: np.bool[L[False]], /) -> L[0]: ... + def __int__(self: bool_[L[False]], /) -> L[0]: ... @overload - def __int__(self: np.bool[L[True]], /) -> L[1]: ... + def __int__(self: bool_[L[True]], /) -> L[1]: ... @overload def __int__(self, /) -> L[0, 1]: ... + def __abs__(self) -> Self: ... @overload - def __invert__(self: np.bool[L[False]], /) -> np.bool[L[True]]: ... + def __invert__(self: bool_[L[False]], /) -> bool_[L[True]]: ... @overload - def __invert__(self: np.bool[L[True]], /) -> np.bool[L[False]]: ... + def __invert__(self: bool_[L[True]], /) -> bool_[L[False]]: ... @overload - def __invert__(self, /) -> np.bool: ... + def __invert__(self, /) -> bool_: ... - __add__: _BoolOp[np.bool] - __radd__: _BoolOp[np.bool] - __sub__: _BoolSub - __rsub__: _BoolSub - __mul__: _BoolOp[np.bool] - __rmul__: _BoolOp[np.bool] - __truediv__: _BoolTrueDiv - __rtruediv__: _BoolTrueDiv - __floordiv__: _BoolOp[int8] - __rfloordiv__: _BoolOp[int8] - __pow__: _BoolOp[int8] - __rpow__: _BoolOp[int8] - - __lshift__: _BoolBitOp[int8] - __rlshift__: _BoolBitOp[int8] - __rshift__: _BoolBitOp[int8] - __rrshift__: _BoolBitOp[int8] + @overload + def __add__[ScalarT: number](self, other: ScalarT, /) -> ScalarT: ... + @overload + def __add__(self, other: py_bool | bool_, /) -> bool_: ... + @overload + def __add__(self, other: int, /) -> int_: ... + @overload + def __add__(self, other: float, /) -> float64: ... + @overload + def __add__(self, other: complex, /) -> complex128: ... @overload - def __and__(self: np.bool[L[False]], other: builtins.bool | np.bool, /) -> np.bool[L[False]]: ... + def __radd__[ScalarT: number](self, other: ScalarT, /) -> ScalarT: ... @overload - def __and__(self, other: L[False] | np.bool[L[False]], /) -> np.bool[L[False]]: ... + def __radd__(self, other: py_bool, /) -> bool_: ... @overload - def __and__(self, other: L[True] | np.bool[L[True]], /) -> Self: ... + def __radd__(self, other: int, /) -> int_: ... @overload - def __and__(self, other: builtins.bool | np.bool, /) -> np.bool: ... + def __radd__(self, other: float, /) -> float64: ... @overload - def __and__(self, other: _IntegerT, /) -> _IntegerT: ... + def __radd__(self, other: complex, /) -> complex128: ... + @overload - def __and__(self, other: int, /) -> np.bool | intp: ... - __rand__ = __and__ + def __sub__[ScalarT: number](self, other: ScalarT, /) -> ScalarT: ... + @overload + def __sub__(self, other: int, /) -> int_: ... + @overload + def __sub__(self, other: float, /) -> float64: ... + @overload + def __sub__(self, other: complex, /) -> complex128: ... @overload - def __xor__(self: np.bool[L[False]], other: _BoolItemT | np.bool[_BoolItemT], /) -> np.bool[_BoolItemT]: ... + def __rsub__[ScalarT: number](self, other: ScalarT, /) -> ScalarT: ... + @overload + def __rsub__(self, other: int, /) -> int_: ... @overload - def __xor__(self: np.bool[L[True]], other: L[True] | np.bool[L[True]], /) -> np.bool[L[False]]: ... + def __rsub__(self, other: float, /) -> float64: ... @overload - def __xor__(self, other: L[False] | np.bool[L[False]], /) -> Self: ... + def __rsub__(self, other: complex, /) -> complex128: ... + @overload - def __xor__(self, other: builtins.bool | np.bool, /) -> np.bool: ... + def __mul__[ScalarT: number](self, other: ScalarT, /) -> ScalarT: ... @overload - def __xor__(self, other: _IntegerT, /) -> _IntegerT: ... + def __mul__(self, other: py_bool | bool_, /) -> bool_: ... @overload - def __xor__(self, other: int, /) -> np.bool | intp: ... - __rxor__ = __xor__ + def __mul__(self, other: int, /) -> int_: ... + @overload + def __mul__(self, other: float, /) -> float64: ... + @overload + def __mul__(self, other: complex, /) -> complex128: ... @overload - def __or__(self: np.bool[L[True]], other: builtins.bool | np.bool, /) -> np.bool[L[True]]: ... + def __rmul__[ScalarT: number](self, other: ScalarT, /) -> ScalarT: ... @overload - def __or__(self, other: L[False] | np.bool[L[False]], /) -> Self: ... + def __rmul__(self, other: py_bool, /) -> bool_: ... @overload - def __or__(self, other: L[True] | np.bool[L[True]], /) -> np.bool[L[True]]: ... + def __rmul__(self, other: int, /) -> int_: ... @overload - def __or__(self, other: builtins.bool | np.bool, /) -> np.bool: ... + def __rmul__(self, other: float, /) -> float64: ... @overload - def __or__(self, other: _IntegerT, /) -> _IntegerT: ... + def __rmul__(self, other: complex, /) -> complex128: ... + @overload - def __or__(self, other: int, /) -> np.bool | intp: ... - __ror__ = __or__ + def __pow__[ScalarT: number](self, other: ScalarT, mod: None = None, /) -> ScalarT: ... + @overload + def __pow__(self, other: py_bool | bool_, mod: None = None, /) -> int8: ... + @overload + def __pow__(self, other: int, mod: None = None, /) -> int_: ... + @overload + def __pow__(self, other: float, mod: None = None, /) -> float64: ... + @overload + def __pow__(self, other: complex, mod: None = None, /) -> complex128: ... - __mod__: _BoolMod - __rmod__: _BoolMod - __divmod__: _BoolDivMod - __rdivmod__: _BoolDivMod + @overload + def __rpow__[ScalarT: number](self, other: ScalarT, mod: None = None, /) -> ScalarT: ... + @overload + def __rpow__(self, other: py_bool, mod: None = None, /) -> int8: ... + @overload + def __rpow__(self, other: int, mod: None = None, /) -> int_: ... + @overload + def __rpow__(self, other: float, mod: None = None, /) -> float64: ... + @overload + def __rpow__(self, other: complex, mod: None = None, /) -> complex128: ... - __lt__: _ComparisonOpLT[_NumberLike_co, _ArrayLikeNumber_co] - __le__: _ComparisonOpLE[_NumberLike_co, _ArrayLikeNumber_co] - __gt__: _ComparisonOpGT[_NumberLike_co, _ArrayLikeNumber_co] - __ge__: _ComparisonOpGE[_NumberLike_co, _ArrayLikeNumber_co] + @overload + def __truediv__[ScalarT: inexact](self, other: ScalarT, /) -> ScalarT: ... + @overload + def __truediv__(self, other: float | integer | bool_, /) -> float64: ... + @overload + def __truediv__(self, other: complex, /) -> complex128: ... -# NOTE: This should _not_ be `Final` or a `TypeAlias` -bool_ = bool + @overload + def __rtruediv__[ScalarT: inexact](self, other: ScalarT, /) -> ScalarT: ... + @overload + def __rtruediv__(self, other: float | integer, /) -> float64: ... + @overload + def __rtruediv__(self, other: complex, /) -> complex128: ... -# NOTE: The `object_` constructor returns the passed object, so instances with type -# `object_` cannot exists (at runtime). -# NOTE: Because mypy has some long-standing bugs related to `__new__`, `object_` can't -# be made generic. -@final -class object_(_RealMixin, generic): @overload - def __new__(cls, nothing_to_see_here: None = None, /) -> None: ... # type: ignore[misc] + def __floordiv__[ScalarT: integer | floating](self, other: ScalarT, /) -> ScalarT: ... @overload - def __new__(cls, stringy: _AnyStr, /) -> _AnyStr: ... # type: ignore[misc] + def __floordiv__(self, other: py_bool | bool_, /) -> int8: ... @overload - def __new__(cls, array: ndarray[_ShapeT, Any], /) -> ndarray[_ShapeT, dtype[Self]]: ... # type: ignore[misc] + def __floordiv__(self, other: int, /) -> int_: ... @overload - def __new__(cls, sequence: SupportsLenAndGetItem[object], /) -> NDArray[Self]: ... # type: ignore[misc] + def __floordiv__(self, other: float, /) -> float64: ... + @overload - def __new__(cls, value: _T, /) -> _T: ... # type: ignore[misc] - @overload # catch-all + def __rfloordiv__[ScalarT: integer | floating](self, other: ScalarT, /) -> ScalarT: ... + @overload + def __rfloordiv__(self, other: py_bool, /) -> int8: ... + @overload + def __rfloordiv__(self, other: int, /) -> int_: ... + @overload + def __rfloordiv__(self, other: float, /) -> float64: ... + + # keep in sync with __floordiv__ + @overload + def __mod__[ScalarT: integer | floating](self, other: ScalarT, /) -> ScalarT: ... + @overload + def __mod__(self, other: py_bool | bool_, /) -> int8: ... + @overload + def __mod__(self, other: int, /) -> int_: ... + @overload + def __mod__(self, other: float, /) -> float64: ... + + # keep in sync with __rfloordiv__ + @overload + def __rmod__[ScalarT: integer | floating](self, other: ScalarT, /) -> ScalarT: ... + @overload + def __rmod__(self, other: py_bool, /) -> int8: ... + @overload + def __rmod__(self, other: int, /) -> int_: ... + @overload + def __rmod__(self, other: float, /) -> float64: ... + + # keep in sync with __mod__ + @overload + def __divmod__[ScalarT: integer | floating](self, other: ScalarT, /) -> _2Tuple[ScalarT]: ... + @overload + def __divmod__(self, other: py_bool | bool_, /) -> _2Tuple[int8]: ... + @overload + def __divmod__(self, other: int, /) -> _2Tuple[int_]: ... + @overload + def __divmod__(self, other: float, /) -> _2Tuple[float64]: ... + + # keep in sync with __rmod__ + @overload + def __rdivmod__[ScalarT: integer | floating](self, other: ScalarT, /) -> _2Tuple[ScalarT]: ... + @overload + def __rdivmod__(self, other: py_bool, /) -> _2Tuple[int8]: ... + @overload + def __rdivmod__(self, other: int, /) -> _2Tuple[int_]: ... + @overload + def __rdivmod__(self, other: float, /) -> _2Tuple[float64]: ... + + @overload + def __lshift__[ScalarT: integer](self, other: ScalarT, /) -> ScalarT: ... + @overload + def __lshift__(self, other: py_bool | bool_, /) -> int8: ... + @overload + def __lshift__(self, other: int, /) -> int_: ... + + @overload + def __rlshift__[ScalarT: integer](self, other: ScalarT, /) -> ScalarT: ... + @overload + def __rlshift__(self, other: py_bool, /) -> int8: ... + @overload + def __rlshift__(self, other: int, /) -> int_: ... + + # keep in sync with __lshift__ + @overload + def __rshift__[ScalarT: integer](self, other: ScalarT, /) -> ScalarT: ... + @overload + def __rshift__(self, other: py_bool | bool_, /) -> int8: ... + @overload + def __rshift__(self, other: int, /) -> int_: ... + + # keep in sync with __rlshift__ + @overload + def __rrshift__[ScalarT: integer](self, other: ScalarT, /) -> ScalarT: ... + @overload + def __rrshift__(self, other: py_bool, /) -> int8: ... + @overload + def __rrshift__(self, other: int, /) -> int_: ... + + @overload + def __and__(self: bool_[L[False]], other: py_bool | bool_, /) -> bool_[L[False]]: ... + @overload + def __and__(self, other: L[False] | bool_[L[False]], /) -> bool_[L[False]]: ... + @overload + def __and__(self, other: L[True] | bool_[L[True]], /) -> Self: ... + @overload + def __and__(self, other: py_bool | bool_, /) -> bool_: ... + @overload + def __and__[ScalarT: integer](self, other: ScalarT, /) -> ScalarT: ... + @overload + def __and__(self, other: int, /) -> bool_ | intp: ... + __rand__ = __and__ + + @overload + def __xor__[ItemT: py_bool](self: bool_[L[False]], other: ItemT | bool_[ItemT], /) -> bool_[ItemT]: ... + @overload + def __xor__(self: bool_[L[True]], other: L[True] | bool_[L[True]], /) -> bool_[L[False]]: ... + @overload + def __xor__(self, other: L[False] | bool_[L[False]], /) -> Self: ... + @overload + def __xor__(self, other: py_bool | bool_, /) -> bool_: ... + @overload + def __xor__[ScalarT: integer](self, other: ScalarT, /) -> ScalarT: ... + @overload + def __xor__(self, other: int, /) -> bool_ | intp: ... + __rxor__ = __xor__ + + @overload + def __or__(self: bool_[L[True]], other: py_bool | bool_, /) -> bool_[L[True]]: ... + @overload + def __or__(self, other: L[False] | bool_[L[False]], /) -> Self: ... + @overload + def __or__(self, other: L[True] | bool_[L[True]], /) -> bool_[L[True]]: ... + @overload + def __or__(self, other: py_bool | bool_, /) -> bool_: ... + @overload + def __or__[ScalarT: integer](self, other: ScalarT, /) -> ScalarT: ... + @overload + def __or__(self, other: int, /) -> bool_ | intp: ... + __ror__ = __or__ + + @overload + def __lt__(self, other: _NumberLike_co, /) -> bool_: ... + @overload + def __lt__(self, other: _ArrayLikeNumber_co | _NestedSequence[_SupportsGT], /) -> NDArray[bool_]: ... + @overload + def __lt__(self, other: _SupportsGT, /) -> bool_: ... + + @overload + def __le__(self, other: _NumberLike_co, /) -> bool_: ... + @overload + def __le__(self, other: _ArrayLikeNumber_co | _NestedSequence[_SupportsGE], /) -> NDArray[bool_]: ... + @overload + def __le__(self, other: _SupportsGE, /) -> bool_: ... + + @overload + def __gt__(self, other: _NumberLike_co, /) -> bool_: ... + @overload + def __gt__(self, other: _ArrayLikeNumber_co | _NestedSequence[_SupportsLT], /) -> NDArray[bool_]: ... + @overload + def __gt__(self, other: _SupportsLT, /) -> bool_: ... + + @overload + def __ge__(self, other: _NumberLike_co, /) -> bool_: ... + @overload + def __ge__(self, other: _ArrayLikeNumber_co | _NestedSequence[_SupportsLE], /) -> NDArray[bool_]: ... + @overload + def __ge__(self, other: _SupportsLE, /) -> bool_: ... + +# NOTE: This should _not_ be `Final[_]`, `_: TypeAlias`, or `type _` +bool_ = bool + +# NOTE: The `object_` constructor returns the passed object, so instances with type +# `object_` cannot exists (at runtime). +# NOTE: Because mypy has some long-standing bugs related to `__new__`, `object_` can't +# be made generic. +@final +class object_(_RealMixin, generic): + @overload + def __new__(cls, value: None = None, /) -> None: ... # type: ignore[misc] + @overload + def __new__[AnyStrT: (LiteralString, str, bytes)](cls, value: AnyStrT, /) -> AnyStrT: ... # type: ignore[misc] + @overload + def __new__[ShapeT: _Shape](cls, value: ndarray[ShapeT, Any], /) -> ndarray[ShapeT, dtype[Self]]: ... # type: ignore[misc] + @overload + def __new__(cls, value: SupportsLenAndGetItem[object], /) -> NDArray[Self]: ... # type: ignore[misc] + @overload + def __new__[T](cls, value: T, /) -> T: ... # type: ignore[misc] + @overload # catch-all def __new__(cls, value: Any = ..., /) -> object | NDArray[Self]: ... # type: ignore[misc] - def __init__(self, value: object = ..., /) -> None: ... + def __hash__(self, /) -> int: ... def __abs__(self, /) -> object_: ... # this affects NDArray[object_].__abs__ def __call__(self, /, *args: object, **kwargs: object) -> Any: ... - if sys.version_info >= (3, 12): - def __release_buffer__(self, buffer: memoryview, /) -> None: ... + def __release_buffer__(self, buffer: memoryview, /) -> None: ... + +class integer(_IntegralMixin, _RoundMixin, number[_NBitT, int]): + @abstractmethod + def __new__(cls, value: _ConvertibleToInt = 0, /) -> Self: ... + + # NOTE: `bit_count` and `__index__` are technically defined in the concrete subtypes + def bit_count(self, /) -> int: ... + def __index__(self, /) -> int: ... + def __invert__(self, /) -> Self: ... + + @override # type: ignore[override] + @overload + def __truediv__(self, other: float | integer, /) -> float64: ... + @overload + def __truediv__(self, other: complex, /) -> complex128: ... + + @override # type: ignore[override] + @overload + def __rtruediv__(self, other: float | integer, /) -> float64: ... + @overload + def __rtruediv__(self, other: complex, /) -> complex128: ... + + def __floordiv__(self, value: _IntLike_co, /) -> integer: ... + def __rfloordiv__(self, value: _IntLike_co, /) -> integer: ... + def __mod__(self, value: _IntLike_co, /) -> integer: ... + def __rmod__(self, value: _IntLike_co, /) -> integer: ... + def __divmod__(self, value: _IntLike_co, /) -> _2Tuple[integer]: ... + def __rdivmod__(self, value: _IntLike_co, /) -> _2Tuple[integer]: ... + + # Ensure that objects annotated as `integer` support bit-wise operations + def __lshift__(self, other: _IntLike_co, /) -> integer: ... + def __rlshift__(self, other: _IntLike_co, /) -> integer: ... + def __rshift__(self, other: _IntLike_co, /) -> integer: ... + def __rrshift__(self, other: _IntLike_co, /) -> integer: ... + def __and__(self, other: _IntLike_co, /) -> integer: ... + def __rand__(self, other: _IntLike_co, /) -> integer: ... + def __or__(self, other: _IntLike_co, /) -> integer: ... + def __ror__(self, other: _IntLike_co, /) -> integer: ... + def __xor__(self, other: _IntLike_co, /) -> integer: ... + def __rxor__(self, other: _IntLike_co, /) -> integer: ... + +class signedinteger(integer[_NBitT]): + def __new__(cls, value: _ConvertibleToInt = 0, /) -> Self: ... + + # arithmetic ops + + @override # type: ignore[override] + @overload + def __add__(self, other: int | int8 | bool_ | Self, /) -> Self: ... + @overload + def __add__(self, other: float, /) -> float64: ... + @overload + def __add__(self, other: complex, /) -> complex128: ... + @overload + def __add__(self, other: signedinteger, /) -> signedinteger: ... + @overload + def __add__(self, other: integer, /) -> Incomplete: ... + + @override # type: ignore[override] + @overload + def __radd__(self, other: int | int8 | bool_, /) -> Self: ... + @overload + def __radd__(self, other: float, /) -> float64: ... + @overload + def __radd__(self, other: complex, /) -> complex128: ... + @overload + def __radd__(self, other: signedinteger, /) -> signedinteger: ... + @overload + def __radd__(self, other: integer, /) -> Incomplete: ... + + @override # type: ignore[override] + @overload + def __sub__(self, other: int | int8 | bool_ | Self, /) -> Self: ... + @overload + def __sub__(self, other: float, /) -> float64: ... + @overload + def __sub__(self, other: complex, /) -> complex128: ... + @overload + def __sub__(self, other: signedinteger, /) -> signedinteger: ... + @overload + def __sub__(self, other: integer, /) -> Incomplete: ... + + @override # type: ignore[override] + @overload + def __rsub__(self, other: int | int8 | bool_, /) -> Self: ... + @overload + def __rsub__(self, other: float, /) -> float64: ... + @overload + def __rsub__(self, other: complex, /) -> complex128: ... + @overload + def __rsub__(self, other: signedinteger, /) -> signedinteger: ... + @overload + def __rsub__(self, other: integer, /) -> Incomplete: ... + + @override # type: ignore[override] + @overload + def __mul__(self, other: int | int8 | bool_ | Self, /) -> Self: ... + @overload + def __mul__(self, other: float, /) -> float64: ... + @overload + def __mul__(self, other: complex, /) -> complex128: ... + @overload + def __mul__(self, other: signedinteger, /) -> signedinteger: ... + @overload + def __mul__(self, other: integer, /) -> Incomplete: ... + + @override # type: ignore[override] + @overload + def __rmul__(self, other: int | int8 | bool_, /) -> Self: ... + @overload + def __rmul__(self, other: float, /) -> float64: ... + @overload + def __rmul__(self, other: complex, /) -> complex128: ... + @overload + def __rmul__(self, other: signedinteger, /) -> signedinteger: ... + @overload + def __rmul__(self, other: integer, /) -> Incomplete: ... + + @override # type: ignore[override] + @overload + def __pow__(self, other: int | int8 | bool_ | Self, mod: None = None, /) -> Self: ... + @overload + def __pow__(self, other: float, mod: None = None, /) -> float64: ... + @overload + def __pow__(self, other: complex, mod: None = None, /) -> complex128: ... + @overload + def __pow__(self, other: signedinteger, mod: None = None, /) -> signedinteger: ... + @overload + def __pow__(self, other: integer, mod: None = None, /) -> Incomplete: ... + + @override # type: ignore[override] + @overload + def __rpow__(self, other: int | int8 | bool_, mod: None = None, /) -> Self: ... + @overload + def __rpow__(self, other: float, mod: None = None, /) -> float64: ... + @overload + def __rpow__(self, other: complex, mod: None = None, /) -> complex128: ... + @overload + def __rpow__(self, other: signedinteger, mod: None = None, /) -> signedinteger: ... + @overload + def __rpow__(self, other: integer, mod: None = None, /) -> Incomplete: ... + + # modular division ops + + @override # type: ignore[override] + @overload + def __floordiv__(self, other: int | int8 | bool_ | Self, /) -> Self: ... + @overload + def __floordiv__(self, other: float, /) -> float64: ... + @overload + def __floordiv__(self, other: signedinteger, /) -> signedinteger: ... + @overload + def __floordiv__(self, other: integer, /) -> Incomplete: ... + + @override # type: ignore[override] + @overload + def __rfloordiv__(self, other: int | int8 | bool_, /) -> Self: ... + @overload + def __rfloordiv__(self, other: float, /) -> float64: ... + @overload + def __rfloordiv__(self, other: signedinteger, /) -> signedinteger: ... + @overload + def __rfloordiv__(self, other: integer, /) -> Incomplete: ... + + @override # type: ignore[override] + @overload + def __mod__(self, other: int | int8 | bool_ | Self, /) -> Self: ... + @overload + def __mod__(self, other: float, /) -> float64: ... + @overload + def __mod__(self, other: signedinteger, /) -> signedinteger: ... + @overload + def __mod__(self, other: integer, /) -> Incomplete: ... + + @override # type: ignore[override] + @overload + def __rmod__(self, other: int | int8 | bool_, /) -> Self: ... + @overload + def __rmod__(self, other: float, /) -> float64: ... + @overload + def __rmod__(self, other: signedinteger, /) -> signedinteger: ... + @overload + def __rmod__(self, other: integer, /) -> Incomplete: ... + + @override # type: ignore[override] + @overload + def __divmod__(self, other: int | int8 | bool_ | Self, /) -> _2Tuple[Self]: ... + @overload + def __divmod__(self, other: float, /) -> _2Tuple[float64]: ... + @overload + def __divmod__(self, other: signedinteger, /) -> _2Tuple[signedinteger]: ... + @overload + def __divmod__(self, other: integer, /) -> _2Tuple[Incomplete]: ... + + @override # type: ignore[override] + @overload + def __rdivmod__(self, other: int | int8 | bool_, /) -> _2Tuple[Self]: ... + @overload + def __rdivmod__(self, other: float, /) -> _2Tuple[float64]: ... + @overload + def __rdivmod__(self, other: signedinteger, /) -> _2Tuple[signedinteger]: ... + @overload + def __rdivmod__(self, other: integer, /) -> _2Tuple[Incomplete]: ... + + # bitwise ops + + @override # type: ignore[override] + @overload + def __lshift__(self, other: int | int8 | bool_ | Self, /) -> Self: ... + @overload + def __lshift__(self, other: integer, /) -> signedinteger: ... + + @override # type: ignore[override] + @overload + def __rlshift__(self, other: int | int8 | bool_, /) -> Self: ... + @overload + def __rlshift__(self, other: integer, /) -> signedinteger: ... + + @override # type: ignore[override] + @overload + def __rshift__(self, other: int | int8 | bool_ | Self, /) -> Self: ... + @overload + def __rshift__(self, other: integer, /) -> signedinteger: ... + + @override # type: ignore[override] + @overload + def __rrshift__(self, other: int | int8 | bool_, /) -> Self: ... + @overload + def __rrshift__(self, other: integer, /) -> signedinteger: ... + + @override # type: ignore[override] + @overload + def __and__(self, other: int | int8 | bool_ | Self, /) -> Self: ... + @overload + def __and__(self, other: integer, /) -> signedinteger: ... + + @override # type: ignore[override] + @overload + def __rand__(self, other: int | int8 | bool_, /) -> Self: ... + @overload + def __rand__(self, other: integer, /) -> signedinteger: ... + + @override # type: ignore[override] + @overload + def __xor__(self, other: int | int8 | bool_ | Self, /) -> Self: ... + @overload + def __xor__(self, other: integer, /) -> signedinteger: ... + + @override # type: ignore[override] + @overload + def __rxor__(self, other: int | int8 | bool_, /) -> Self: ... + @overload + def __rxor__(self, other: integer, /) -> signedinteger: ... + + @override # type: ignore[override] + @overload + def __or__(self, other: int | int8 | bool_ | Self, /) -> Self: ... + @overload + def __or__(self, other: integer, /) -> signedinteger: ... + + @override # type: ignore[override] + @overload + def __ror__(self, other: int | int8 | bool_, /) -> Self: ... + @overload + def __ror__(self, other: integer, /) -> signedinteger: ... + +int8 = signedinteger[_8Bit] +int16 = signedinteger[_16Bit] +int32 = signedinteger[_32Bit] +int64 = signedinteger[_64Bit] + +byte = signedinteger[_NBitByte] +short = signedinteger[_NBitShort] +intc = signedinteger[_NBitIntC] +intp = signedinteger[_NBitIntP] +int_ = intp +long = signedinteger[_NBitLong] +longlong = signedinteger[_NBitLongLong] + +class unsignedinteger(integer[_NBitT]): + def __new__(cls, value: _ConvertibleToInt = 0, /) -> Self: ... + + # arithmetic ops + + @override # type: ignore[override] + @overload + def __add__(self, other: int | uint8 | bool_ | Self, /) -> Self: ... + @overload + def __add__(self, other: float, /) -> float64: ... + @overload + def __add__(self, other: complex, /) -> complex128: ... + @overload + def __add__(self, other: unsignedinteger, /) -> unsignedinteger: ... + @overload + def __add__(self, other: integer, /) -> Incomplete: ... + + @override # type: ignore[override] + @overload + def __radd__(self, other: int | uint8 | bool_, /) -> Self: ... + @overload + def __radd__(self, other: float, /) -> float64: ... + @overload + def __radd__(self, other: complex, /) -> complex128: ... + @overload + def __radd__(self, other: unsignedinteger, /) -> unsignedinteger: ... + @overload + def __radd__(self, other: integer, /) -> Incomplete: ... + + @override # type: ignore[override] + @overload + def __sub__(self, other: int | uint8 | bool_ | Self, /) -> Self: ... + @overload + def __sub__(self, other: float, /) -> float64: ... + @overload + def __sub__(self, other: complex, /) -> complex128: ... + @overload + def __sub__(self, other: unsignedinteger, /) -> unsignedinteger: ... + @overload + def __sub__(self, other: integer, /) -> Incomplete: ... + + @override # type: ignore[override] + @overload + def __rsub__(self, other: int | uint8 | bool_, /) -> Self: ... + @overload + def __rsub__(self, other: float, /) -> float64: ... + @overload + def __rsub__(self, other: complex, /) -> complex128: ... + @overload + def __rsub__(self, other: unsignedinteger, /) -> unsignedinteger: ... + @overload + def __rsub__(self, other: integer, /) -> Incomplete: ... + + @override # type: ignore[override] + @overload + def __mul__(self, other: int | uint8 | bool_ | Self, /) -> Self: ... + @overload + def __mul__(self, other: float, /) -> float64: ... + @overload + def __mul__(self, other: complex, /) -> complex128: ... + @overload + def __mul__(self, other: unsignedinteger, /) -> unsignedinteger: ... + @overload + def __mul__(self, other: integer, /) -> Incomplete: ... + + @override # type: ignore[override] + @overload + def __rmul__(self, other: int | uint8 | bool_, /) -> Self: ... + @overload + def __rmul__(self, other: float, /) -> float64: ... + @overload + def __rmul__(self, other: complex, /) -> complex128: ... + @overload + def __rmul__(self, other: unsignedinteger, /) -> unsignedinteger: ... + @overload + def __rmul__(self, other: integer, /) -> Incomplete: ... + + @override # type: ignore[override] + @overload + def __pow__(self, other: int | uint8 | bool_ | Self, mod: None = None, /) -> Self: ... + @overload + def __pow__(self, other: float, mod: None = None, /) -> float64: ... + @overload + def __pow__(self, other: complex, mod: None = None, /) -> complex128: ... + @overload + def __pow__(self, other: unsignedinteger, mod: None = None, /) -> unsignedinteger: ... + @overload + def __pow__(self, other: integer, mod: None = None, /) -> Incomplete: ... + + @override # type: ignore[override] + @overload + def __rpow__(self, other: int | uint8 | bool_, mod: None = None, /) -> Self: ... + @overload + def __rpow__(self, other: float, mod: None = None, /) -> float64: ... + @overload + def __rpow__(self, other: complex, mod: None = None, /) -> complex128: ... + @overload + def __rpow__(self, other: unsignedinteger, mod: None = None, /) -> unsignedinteger: ... + @overload + def __rpow__(self, other: integer, mod: None = None, /) -> Incomplete: ... + + # modular division ops + + @override # type: ignore[override] + @overload + def __floordiv__(self, other: int | uint8 | bool_ | Self, /) -> Self: ... + @overload + def __floordiv__(self, other: float, /) -> float64: ... + @overload + def __floordiv__(self, other: unsignedinteger, /) -> unsignedinteger: ... + @overload + def __floordiv__(self, other: integer, /) -> Incomplete: ... + + @override # type: ignore[override] + @overload + def __rfloordiv__(self, other: int | uint8 | bool_, /) -> Self: ... + @overload + def __rfloordiv__(self, other: float, /) -> float64: ... + @overload + def __rfloordiv__(self, other: unsignedinteger, /) -> unsignedinteger: ... + @overload + def __rfloordiv__(self, other: integer, /) -> Incomplete: ... + + @override # type: ignore[override] + @overload + def __mod__(self, other: int | uint8 | bool_ | Self, /) -> Self: ... + @overload + def __mod__(self, other: float, /) -> float64: ... + @overload + def __mod__(self, other: unsignedinteger, /) -> unsignedinteger: ... + @overload + def __mod__(self, other: integer, /) -> Incomplete: ... + + @override # type: ignore[override] + @overload + def __rmod__(self, other: int | uint8 | bool_, /) -> Self: ... + @overload + def __rmod__(self, other: float, /) -> float64: ... + @overload + def __rmod__(self, other: unsignedinteger, /) -> unsignedinteger: ... + @overload + def __rmod__(self, other: integer, /) -> Incomplete: ... + + @override # type: ignore[override] + @overload + def __divmod__(self, other: int | uint8 | bool_ | Self, /) -> _2Tuple[Self]: ... + @overload + def __divmod__(self, other: float, /) -> _2Tuple[float64]: ... + @overload + def __divmod__(self, other: unsignedinteger, /) -> _2Tuple[unsignedinteger]: ... + @overload + def __divmod__(self, other: integer, /) -> _2Tuple[Incomplete]: ... + + @override # type: ignore[override] + @overload + def __rdivmod__(self, other: int | uint8 | bool_, /) -> _2Tuple[Self]: ... + @overload + def __rdivmod__(self, other: float, /) -> _2Tuple[float64]: ... + @overload + def __rdivmod__(self, other: unsignedinteger, /) -> _2Tuple[unsignedinteger]: ... + @overload + def __rdivmod__(self, other: integer, /) -> _2Tuple[Incomplete]: ... + + # bitwise ops + + @override # type: ignore[override] + @overload + def __lshift__(self, other: int | int8 | bool_ | Self, /) -> Self: ... + @overload + def __lshift__(self, other: unsignedinteger, /) -> unsignedinteger: ... + @overload + def __lshift__(self, other: signedinteger, /) -> signedinteger: ... + + @override # type: ignore[override] + @overload + def __rlshift__(self, other: int | int8 | bool_, /) -> Self: ... + @overload + def __rlshift__(self, other: unsignedinteger, /) -> unsignedinteger: ... + @overload + def __rlshift__(self, other: signedinteger, /) -> signedinteger: ... + + @override # type: ignore[override] + @overload + def __rshift__(self, other: int | int8 | bool_ | Self, /) -> Self: ... + @overload + def __rshift__(self, other: unsignedinteger, /) -> unsignedinteger: ... + @overload + def __rshift__(self, other: signedinteger, /) -> signedinteger: ... + + @override # type: ignore[override] + @overload + def __rrshift__(self, other: int | int8 | bool_, /) -> Self: ... + @overload + def __rrshift__(self, other: unsignedinteger, /) -> unsignedinteger: ... + @overload + def __rrshift__(self, other: signedinteger, /) -> signedinteger: ... + + @override # type: ignore[override] + @overload + def __and__(self, other: int | int8 | bool_ | Self, /) -> Self: ... + @overload + def __and__(self, other: unsignedinteger, /) -> unsignedinteger: ... + @overload + def __and__(self, other: signedinteger, /) -> signedinteger: ... + + @override # type: ignore[override] + @overload + def __rand__(self, other: int | int8 | bool_, /) -> Self: ... + @overload + def __rand__(self, other: unsignedinteger, /) -> unsignedinteger: ... + @overload + def __rand__(self, other: signedinteger, /) -> signedinteger: ... -class integer(_IntegralMixin, _RoundMixin, number[_NBit, int]): - @abstractmethod - def __init__(self, value: _ConvertibleToInt = ..., /) -> None: ... + @override # type: ignore[override] + @overload + def __xor__(self, other: int | int8 | bool_ | Self, /) -> Self: ... + @overload + def __xor__(self, other: unsignedinteger, /) -> unsignedinteger: ... + @overload + def __xor__(self, other: signedinteger, /) -> signedinteger: ... - # NOTE: `bit_count` and `__index__` are technically defined in the concrete subtypes - def bit_count(self, /) -> int: ... - def __index__(self, /) -> int: ... - def __invert__(self, /) -> Self: ... + @override # type: ignore[override] + @overload + def __rxor__(self, other: int | int8 | bool_, /) -> Self: ... + @overload + def __rxor__(self, other: unsignedinteger, /) -> unsignedinteger: ... + @overload + def __rxor__(self, other: signedinteger, /) -> signedinteger: ... - __truediv__: _IntTrueDiv[_NBit] - __rtruediv__: _IntTrueDiv[_NBit] - def __mod__(self, value: _IntLike_co, /) -> integer: ... - def __rmod__(self, value: _IntLike_co, /) -> integer: ... - # Ensure that objects annotated as `integer` support bit-wise operations - def __lshift__(self, other: _IntLike_co, /) -> integer: ... - def __rlshift__(self, other: _IntLike_co, /) -> integer: ... - def __rshift__(self, other: _IntLike_co, /) -> integer: ... - def __rrshift__(self, other: _IntLike_co, /) -> integer: ... - def __and__(self, other: _IntLike_co, /) -> integer: ... - def __rand__(self, other: _IntLike_co, /) -> integer: ... - def __or__(self, other: _IntLike_co, /) -> integer: ... - def __ror__(self, other: _IntLike_co, /) -> integer: ... - def __xor__(self, other: _IntLike_co, /) -> integer: ... - def __rxor__(self, other: _IntLike_co, /) -> integer: ... + @override # type: ignore[override] + @overload + def __or__(self, other: int | int8 | bool_ | Self, /) -> Self: ... + @overload + def __or__(self, other: unsignedinteger, /) -> unsignedinteger: ... + @overload + def __or__(self, other: signedinteger, /) -> signedinteger: ... -class signedinteger(integer[_NBit1]): - def __init__(self, value: _ConvertibleToInt = ..., /) -> None: ... - - __add__: _SignedIntOp[_NBit1] - __radd__: _SignedIntOp[_NBit1] - __sub__: _SignedIntOp[_NBit1] - __rsub__: _SignedIntOp[_NBit1] - __mul__: _SignedIntOp[_NBit1] - __rmul__: _SignedIntOp[_NBit1] - __floordiv__: _SignedIntOp[_NBit1] - __rfloordiv__: _SignedIntOp[_NBit1] - __pow__: _SignedIntOp[_NBit1] - __rpow__: _SignedIntOp[_NBit1] - __lshift__: _SignedIntBitOp[_NBit1] - __rlshift__: _SignedIntBitOp[_NBit1] - __rshift__: _SignedIntBitOp[_NBit1] - __rrshift__: _SignedIntBitOp[_NBit1] - __and__: _SignedIntBitOp[_NBit1] - __rand__: _SignedIntBitOp[_NBit1] - __xor__: _SignedIntBitOp[_NBit1] - __rxor__: _SignedIntBitOp[_NBit1] - __or__: _SignedIntBitOp[_NBit1] - __ror__: _SignedIntBitOp[_NBit1] - __mod__: _SignedIntMod[_NBit1] - __rmod__: _SignedIntMod[_NBit1] - __divmod__: _SignedIntDivMod[_NBit1] - __rdivmod__: _SignedIntDivMod[_NBit1] + @override # type: ignore[override] + @overload + def __ror__(self, other: int | int8 | bool_, /) -> Self: ... + @overload + def __ror__(self, other: unsignedinteger, /) -> unsignedinteger: ... + @overload + def __ror__(self, other: signedinteger, /) -> signedinteger: ... -int8 = signedinteger[_8Bit] -int16 = signedinteger[_16Bit] -int32 = signedinteger[_32Bit] -int64 = signedinteger[_64Bit] +uint8 = unsignedinteger[_8Bit] +uint16 = unsignedinteger[_16Bit] +uint32 = unsignedinteger[_32Bit] +uint64 = unsignedinteger[_64Bit] -byte = signedinteger[_NBitByte] -short = signedinteger[_NBitShort] -intc = signedinteger[_NBitIntC] -intp = signedinteger[_NBitIntP] -int_ = intp -long = signedinteger[_NBitLong] -longlong = signedinteger[_NBitLongLong] +ubyte = unsignedinteger[_NBitByte] +ushort = unsignedinteger[_NBitShort] +uintc = unsignedinteger[_NBitIntC] +uintp = unsignedinteger[_NBitIntP] +uint = uintp +ulong = unsignedinteger[_NBitLong] +ulonglong = unsignedinteger[_NBitLongLong] -class unsignedinteger(integer[_NBit1]): - # NOTE: `uint64 + signedinteger -> float64` - def __init__(self, value: _ConvertibleToInt = ..., /) -> None: ... - - __add__: _UnsignedIntOp[_NBit1] - __radd__: _UnsignedIntOp[_NBit1] - __sub__: _UnsignedIntOp[_NBit1] - __rsub__: _UnsignedIntOp[_NBit1] - __mul__: _UnsignedIntOp[_NBit1] - __rmul__: _UnsignedIntOp[_NBit1] - __floordiv__: _UnsignedIntOp[_NBit1] - __rfloordiv__: _UnsignedIntOp[_NBit1] - __pow__: _UnsignedIntOp[_NBit1] - __rpow__: _UnsignedIntOp[_NBit1] - __lshift__: _UnsignedIntBitOp[_NBit1] - __rlshift__: _UnsignedIntBitOp[_NBit1] - __rshift__: _UnsignedIntBitOp[_NBit1] - __rrshift__: _UnsignedIntBitOp[_NBit1] - __and__: _UnsignedIntBitOp[_NBit1] - __rand__: _UnsignedIntBitOp[_NBit1] - __xor__: _UnsignedIntBitOp[_NBit1] - __rxor__: _UnsignedIntBitOp[_NBit1] - __or__: _UnsignedIntBitOp[_NBit1] - __ror__: _UnsignedIntBitOp[_NBit1] - __mod__: _UnsignedIntMod[_NBit1] - __rmod__: _UnsignedIntMod[_NBit1] - __divmod__: _UnsignedIntDivMod[_NBit1] - __rdivmod__: _UnsignedIntDivMod[_NBit1] - -uint8: TypeAlias = unsignedinteger[_8Bit] -uint16: TypeAlias = unsignedinteger[_16Bit] -uint32: TypeAlias = unsignedinteger[_32Bit] -uint64: TypeAlias = unsignedinteger[_64Bit] - -ubyte: TypeAlias = unsignedinteger[_NBitByte] -ushort: TypeAlias = unsignedinteger[_NBitShort] -uintc: TypeAlias = unsignedinteger[_NBitIntC] -uintp: TypeAlias = unsignedinteger[_NBitIntP] -uint: TypeAlias = uintp -ulong: TypeAlias = unsignedinteger[_NBitLong] -ulonglong: TypeAlias = unsignedinteger[_NBitLongLong] - -class inexact(number[_NBit, _InexactItemT_co], Generic[_NBit, _InexactItemT_co]): +class inexact(number[_NBitT, _InexactItemT_co], Generic[_NBitT, _InexactItemT_co]): @abstractmethod - def __init__(self, value: _InexactItemT_co | None = ..., /) -> None: ... - -class floating(_RealMixin, _RoundMixin, inexact[_NBit1, float]): - def __init__(self, value: _ConvertibleToFloat | None = ..., /) -> None: ... - - __add__: _FloatOp[_NBit1] - __radd__: _FloatOp[_NBit1] - __sub__: _FloatOp[_NBit1] - __rsub__: _FloatOp[_NBit1] - __mul__: _FloatOp[_NBit1] - __rmul__: _FloatOp[_NBit1] - __truediv__: _FloatOp[_NBit1] - __rtruediv__: _FloatOp[_NBit1] - __floordiv__: _FloatOp[_NBit1] - __rfloordiv__: _FloatOp[_NBit1] - __pow__: _FloatOp[_NBit1] - __rpow__: _FloatOp[_NBit1] - __mod__: _FloatMod[_NBit1] - __rmod__: _FloatMod[_NBit1] - __divmod__: _FloatDivMod[_NBit1] - __rdivmod__: _FloatDivMod[_NBit1] + def __new__(cls, value: _ConvertibleToFloat | None = 0, /) -> Self: ... + +class floating(_RealMixin, _RoundMixin, inexact[_NBitT, float]): + def __new__(cls, value: _ConvertibleToFloat | None = 0, /) -> Self: ... + + # arithmetic ops + + @override # type: ignore[override] + @overload + def __add__(self, other: int | float16 | uint8 | int8 | bool_ | Self, /) -> Self: ... + @overload + def __add__(self, other: integer | floating, /) -> floating: ... + @overload + def __add__(self, other: float, /) -> Self: ... + @overload + def __add__(self, other: complex, /) -> complexfloating: ... + + @override # type: ignore[override] + @overload + def __radd__(self, other: int | float16 | uint8 | int8 | bool_, /) -> Self: ... + @overload + def __radd__(self, other: integer | floating, /) -> floating: ... + @overload + def __radd__(self, other: float, /) -> Self: ... + @overload + def __radd__(self, other: complex, /) -> complexfloating: ... + + @override # type: ignore[override] + @overload + def __sub__(self, other: int | float16 | uint8 | int8 | bool_ | Self, /) -> Self: ... + @overload + def __sub__(self, other: integer | floating, /) -> floating: ... + @overload + def __sub__(self, other: float, /) -> Self: ... + @overload + def __sub__(self, other: complex, /) -> complexfloating: ... + + @override # type: ignore[override] + @overload + def __rsub__(self, other: int | float16 | uint8 | int8 | bool_, /) -> Self: ... + @overload + def __rsub__(self, other: integer | floating, /) -> floating: ... + @overload + def __rsub__(self, other: float, /) -> Self: ... + @overload + def __rsub__(self, other: complex, /) -> complexfloating: ... + + @override # type: ignore[override] + @overload + def __mul__(self, other: int | float16 | uint8 | int8 | bool_ | Self, /) -> Self: ... + @overload + def __mul__(self, other: integer | floating, /) -> floating: ... + @overload + def __mul__(self, other: float, /) -> Self: ... + @overload + def __mul__(self, other: complex, /) -> complexfloating: ... + + @override # type: ignore[override] + @overload + def __rmul__(self, other: int | float16 | uint8 | int8 | bool_, /) -> Self: ... + @overload + def __rmul__(self, other: integer | floating, /) -> floating: ... + @overload + def __rmul__(self, other: float, /) -> Self: ... + @overload + def __rmul__(self, other: complex, /) -> complexfloating: ... + + @override # type: ignore[override] + @overload + def __pow__(self, other: int | float16 | uint8 | int8 | bool_ | Self, mod: None = None, /) -> Self: ... + @overload + def __pow__(self, other: integer | floating, mod: None = None, /) -> floating: ... + @overload + def __pow__(self, other: float, mod: None = None, /) -> Self: ... + @overload + def __pow__(self, other: complex, mod: None = None, /) -> complexfloating: ... + + @override # type: ignore[override] + @overload + def __rpow__(self, other: int | float16 | uint8 | int8 | bool_, mod: None = None, /) -> Self: ... + @overload + def __rpow__(self, other: integer | floating, mod: None = None, /) -> floating: ... + @overload + def __rpow__(self, other: float, mod: None = None, /) -> Self: ... + @overload + def __rpow__(self, other: complex, mod: None = None, /) -> complexfloating: ... + + @override # type: ignore[override] + @overload + def __truediv__(self, other: int | float16 | uint8 | int8 | bool_ | Self, /) -> Self: ... + @overload + def __truediv__(self, other: integer | floating, /) -> floating: ... + @overload + def __truediv__(self, other: float, /) -> Self: ... + @overload + def __truediv__(self, other: complex, /) -> complexfloating: ... + + @override # type: ignore[override] + @overload + def __rtruediv__(self, other: int | float16 | uint8 | int8 | bool_, /) -> Self: ... + @overload + def __rtruediv__(self, other: integer | floating, /) -> floating: ... + @overload + def __rtruediv__(self, other: float, /) -> Self: ... + @overload + def __rtruediv__(self, other: complex, /) -> complexfloating: ... + + # modular division ops + + @overload + def __floordiv__(self, other: int | float16 | uint8 | int8 | bool_ | Self, /) -> Self: ... + @overload + def __floordiv__(self, other: integer | floating, /) -> floating: ... + @overload + def __floordiv__(self, other: float, /) -> Self: ... + + @overload + def __rfloordiv__(self, other: int | float16 | uint8 | int8 | bool_, /) -> Self: ... + @overload + def __rfloordiv__(self, other: integer | floating, /) -> floating: ... + @overload + def __rfloordiv__(self, other: float, /) -> Self: ... + + @overload + def __mod__(self, other: int | float16 | uint8 | int8 | bool_ | Self, /) -> Self: ... + @overload + def __mod__(self, other: integer | floating, /) -> floating: ... + @overload + def __mod__(self, other: float, /) -> Self: ... + + @overload + def __rmod__(self, other: int | float16 | uint8 | int8 | bool_, /) -> Self: ... + @overload + def __rmod__(self, other: integer | floating, /) -> floating: ... + @overload + def __rmod__(self, other: float, /) -> Self: ... + + @overload + def __divmod__(self, other: int | float16 | uint8 | int8 | bool_ | Self, /) -> _2Tuple[Self]: ... + @overload + def __divmod__(self, other: integer | floating, /) -> _2Tuple[floating]: ... + @overload + def __divmod__(self, other: float, /) -> _2Tuple[Self]: ... + + @overload + def __rdivmod__(self, other: int | float16 | uint8 | int8 | bool_, /) -> _2Tuple[Self]: ... + @overload + def __rdivmod__(self, other: integer | floating, /) -> _2Tuple[floating]: ... + @overload + def __rdivmod__(self, other: float, /) -> _2Tuple[Self]: ... # NOTE: `is_integer` and `as_integer_ratio` are technically defined in the concrete subtypes - def is_integer(self, /) -> builtins.bool: ... + def is_integer(self, /) -> py_bool: ... def as_integer_ratio(self, /) -> tuple[int, int]: ... -float16: TypeAlias = floating[_16Bit] -float32: TypeAlias = floating[_32Bit] +float16 = floating[_16Bit] +float32 = floating[_32Bit] # either a C `double`, `float`, or `longdouble` class float64(floating[_64Bit], float): # type: ignore[misc] - def __new__(cls, x: _ConvertibleToFloat | None = ..., /) -> Self: ... - - # @property def itemsize(self) -> L[8]: ... @property @@ -4163,236 +5081,250 @@ class float64(floating[_64Bit], float): # type: ignore[misc] @property def imag(self) -> Self: ... def conjugate(self) -> Self: ... - def __getformat__(self, typestr: L["double", "float"], /) -> str: ... def __getnewargs__(self, /) -> tuple[float]: ... + @classmethod + def __getformat__(cls, typestr: L["double", "float"], /) -> str: ... # undocumented + # float64-specific operator overrides - @overload + # NOTE: Mypy reports [misc] errors about "unsafely overlapping signatures" for the + # reflected methods. But since they are identical to the non-reflected versions, + # these errors appear to be false positives. + + @overload # type: ignore[override] def __add__(self, other: _Float64_co, /) -> float64: ... @overload def __add__(self, other: complexfloating[_64Bit, _64Bit], /) -> complex128: ... @overload - def __add__(self, other: complexfloating[_NBit1, _NBit2], /) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... + def __add__[NBitT: NBitBase](self, other: complexfloating[NBitT], /) -> complexfloating[NBitT | _64Bit]: ... @overload def __add__(self, other: complex, /) -> float64 | complex128: ... + + @overload # type: ignore[override] + def __radd__(self, other: _Float64_co, /) -> float64: ... # type: ignore[misc] @overload - def __radd__(self, other: _Float64_co, /) -> float64: ... - @overload - def __radd__(self, other: complexfloating[_64Bit, _64Bit], /) -> complex128: ... + def __radd__(self, other: complexfloating[_64Bit, _64Bit], /) -> complex128: ... # type: ignore[misc] @overload - def __radd__(self, other: complexfloating[_NBit1, _NBit2], /) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... + def __radd__[NBitT: NBitBase](self, other: complexfloating[NBitT], /) -> complexfloating[NBitT | _64Bit]: ... @overload def __radd__(self, other: complex, /) -> float64 | complex128: ... - @overload + @overload # type: ignore[override] def __sub__(self, other: _Float64_co, /) -> float64: ... @overload def __sub__(self, other: complexfloating[_64Bit, _64Bit], /) -> complex128: ... @overload - def __sub__(self, other: complexfloating[_NBit1, _NBit2], /) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... + def __sub__[NBitT: NBitBase](self, other: complexfloating[NBitT], /) -> complexfloating[NBitT | _64Bit]: ... @overload def __sub__(self, other: complex, /) -> float64 | complex128: ... + + @overload # type: ignore[override] + def __rsub__(self, other: _Float64_co, /) -> float64: ... # type: ignore[misc] @overload - def __rsub__(self, other: _Float64_co, /) -> float64: ... - @overload - def __rsub__(self, other: complexfloating[_64Bit, _64Bit], /) -> complex128: ... + def __rsub__(self, other: complexfloating[_64Bit, _64Bit], /) -> complex128: ... # type: ignore[misc] @overload - def __rsub__(self, other: complexfloating[_NBit1, _NBit2], /) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... + def __rsub__[NBitT: NBitBase](self, other: complexfloating[NBitT], /) -> complexfloating[NBitT | _64Bit]: ... @overload def __rsub__(self, other: complex, /) -> float64 | complex128: ... - @overload + @overload # type: ignore[override] def __mul__(self, other: _Float64_co, /) -> float64: ... @overload def __mul__(self, other: complexfloating[_64Bit, _64Bit], /) -> complex128: ... @overload - def __mul__(self, other: complexfloating[_NBit1, _NBit2], /) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... + def __mul__[NBitT: NBitBase](self, other: complexfloating[NBitT], /) -> complexfloating[NBitT | _64Bit]: ... @overload def __mul__(self, other: complex, /) -> float64 | complex128: ... + + @overload # type: ignore[override] + def __rmul__(self, other: _Float64_co, /) -> float64: ... # type: ignore[misc] @overload - def __rmul__(self, other: _Float64_co, /) -> float64: ... - @overload - def __rmul__(self, other: complexfloating[_64Bit, _64Bit], /) -> complex128: ... + def __rmul__(self, other: complexfloating[_64Bit, _64Bit], /) -> complex128: ... # type: ignore[misc] @overload - def __rmul__(self, other: complexfloating[_NBit1, _NBit2], /) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... + def __rmul__[NBitT: NBitBase](self, other: complexfloating[NBitT], /) -> complexfloating[NBitT | _64Bit]: ... @overload def __rmul__(self, other: complex, /) -> float64 | complex128: ... - @overload + @overload # type: ignore[override] def __truediv__(self, other: _Float64_co, /) -> float64: ... @overload def __truediv__(self, other: complexfloating[_64Bit, _64Bit], /) -> complex128: ... @overload - def __truediv__(self, other: complexfloating[_NBit1, _NBit2], /) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... + def __truediv__[NBitT: NBitBase](self, other: complexfloating[NBitT], /) -> complexfloating[NBitT | _64Bit]: ... @overload def __truediv__(self, other: complex, /) -> float64 | complex128: ... + + @overload # type: ignore[override] + def __rtruediv__(self, other: _Float64_co, /) -> float64: ... # type: ignore[misc] @overload - def __rtruediv__(self, other: _Float64_co, /) -> float64: ... - @overload - def __rtruediv__(self, other: complexfloating[_64Bit, _64Bit], /) -> complex128: ... + def __rtruediv__(self, other: complexfloating[_64Bit, _64Bit], /) -> complex128: ... # type: ignore[misc] @overload - def __rtruediv__(self, other: complexfloating[_NBit1, _NBit2], /) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... + def __rtruediv__[NBitT: NBitBase](self, other: complexfloating[NBitT], /) -> complexfloating[NBitT | _64Bit]: ... @overload def __rtruediv__(self, other: complex, /) -> float64 | complex128: ... - @overload + @overload # type: ignore[override] def __floordiv__(self, other: _Float64_co, /) -> float64: ... @overload def __floordiv__(self, other: complexfloating[_64Bit, _64Bit], /) -> complex128: ... @overload - def __floordiv__(self, other: complexfloating[_NBit1, _NBit2], /) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... + def __floordiv__[NBitT: NBitBase](self, other: complexfloating[NBitT], /) -> complexfloating[NBitT | _64Bit]: ... @overload def __floordiv__(self, other: complex, /) -> float64 | complex128: ... + @overload - def __rfloordiv__(self, other: _Float64_co, /) -> float64: ... + def __rfloordiv__(self, other: _Float64_co, /) -> float64: ... # type: ignore[misc] @overload def __rfloordiv__(self, other: complexfloating[_64Bit, _64Bit], /) -> complex128: ... @overload - def __rfloordiv__(self, other: complexfloating[_NBit1, _NBit2], /) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... + def __rfloordiv__[NBitT: NBitBase](self, other: complexfloating[NBitT], /) -> complexfloating[NBitT | _64Bit]: ... @overload def __rfloordiv__(self, other: complex, /) -> float64 | complex128: ... - @overload + @overload # type: ignore[override] def __pow__(self, other: _Float64_co, mod: None = None, /) -> float64: ... @overload def __pow__(self, other: complexfloating[_64Bit, _64Bit], mod: None = None, /) -> complex128: ... @overload - def __pow__( - self, other: complexfloating[_NBit1, _NBit2], mod: None = None, / - ) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... + def __pow__[NBitT: NBitBase](self, other: complexfloating[NBitT], mod: None = None, /) -> complexfloating[NBitT | _64Bit]: ... @overload def __pow__(self, other: complex, mod: None = None, /) -> float64 | complex128: ... + + @overload # type: ignore[override] + def __rpow__(self, other: _Float64_co, mod: None = None, /) -> float64: ... # type: ignore[misc] @overload - def __rpow__(self, other: _Float64_co, mod: None = None, /) -> float64: ... - @overload - def __rpow__(self, other: complexfloating[_64Bit, _64Bit], mod: None = None, /) -> complex128: ... + def __rpow__(self, other: complexfloating[_64Bit, _64Bit], mod: None = None, /) -> complex128: ... # type: ignore[misc] @overload - def __rpow__( - self, other: complexfloating[_NBit1, _NBit2], mod: None = None, / - ) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... + def __rpow__[NBitT: NBitBase]( + self, other: complexfloating[NBitT], mod: None = None, / + ) -> complexfloating[NBitT | _64Bit]: ... @overload def __rpow__(self, other: complex, mod: None = None, /) -> float64 | complex128: ... - def __mod__(self, other: _Float64_co, /) -> float64: ... # type: ignore[override] - def __rmod__(self, other: _Float64_co, /) -> float64: ... # type: ignore[override] + def __mod__(self, other: _Float64_co, /) -> float64: ... + def __rmod__(self, other: _Float64_co, /) -> float64: ... # type: ignore[misc] - def __divmod__(self, other: _Float64_co, /) -> _2Tuple[float64]: ... # type: ignore[override] - def __rdivmod__(self, other: _Float64_co, /) -> _2Tuple[float64]: ... # type: ignore[override] + def __divmod__(self, other: _Float64_co, /) -> _2Tuple[float64]: ... + def __rdivmod__(self, other: _Float64_co, /) -> _2Tuple[float64]: ... # type: ignore[misc] -half: TypeAlias = floating[_NBitHalf] -single: TypeAlias = floating[_NBitSingle] -double: TypeAlias = floating[_NBitDouble] -longdouble: TypeAlias = floating[_NBitLongDouble] +half = float16 +single = float32 +double = float64 +longdouble = floating[_NBitLongDouble] # The main reason for `complexfloating` having two typevars is cosmetic. # It is used to clarify why `complex128`s precision is `_64Bit`, the latter # describing the two 64 bit floats representing its real and imaginary component -class complexfloating(inexact[_NBit1, complex], Generic[_NBit1, _NBit2]): +class complexfloating(inexact[_NBitT1, complex], Generic[_NBitT1, _NBitT2]): @overload - def __init__( - self, - real: complex | SupportsComplex | SupportsFloat | SupportsIndex = ..., - imag: complex | SupportsFloat | SupportsIndex = ..., + def __new__( + cls, + real: complex | SupportsComplex | SupportsFloat | SupportsIndex = 0, + imag: complex | SupportsFloat | SupportsIndex = 0, /, - ) -> None: ... + ) -> Self: ... @overload - def __init__(self, real: _ConvertibleToComplex | None = ..., /) -> None: ... + def __new__(cls, real: _ConvertibleToComplex | None = 0, /) -> Self: ... @property - def real(self) -> floating[_NBit1]: ... # type: ignore[override] + def real(self) -> floating[_NBitT1]: ... @property - def imag(self) -> floating[_NBit2]: ... # type: ignore[override] + def imag(self) -> floating[_NBitT2]: ... # NOTE: `__complex__` is technically defined in the concrete subtypes def __complex__(self, /) -> complex: ... - def __abs__(self, /) -> floating[_NBit1 | _NBit2]: ... # type: ignore[override] + def __abs__(self, /) -> floating[_NBitT1 | _NBitT2]: ... # type: ignore[override] + @overload # type: ignore[override] + def __add__(self, other: _Complex64_co, /) -> complexfloating[_NBitT1, _NBitT2]: ... @overload - def __add__(self, other: _Complex64_co, /) -> complexfloating[_NBit1, _NBit2]: ... - @overload - def __add__(self, other: complex | float64 | complex128, /) -> complexfloating[_NBit1, _NBit2] | complex128: ... - @overload - def __add__(self, other: number[_NBit], /) -> complexfloating[_NBit1, _NBit2] | complexfloating[_NBit, _NBit]: ... - @overload - def __radd__(self, other: _Complex64_co, /) -> complexfloating[_NBit1, _NBit2]: ... - @overload - def __radd__(self, other: complex, /) -> complexfloating[_NBit1, _NBit2] | complex128: ... + def __add__(self, other: complex | float64 | complex128, /) -> complexfloating[_NBitT1, _NBitT2] | complex128: ... @overload - def __radd__(self, other: number[_NBit], /) -> complexfloating[_NBit1, _NBit2] | complexfloating[_NBit, _NBit]: ... + def __add__[NBitT: NBitBase](self, other: number[NBitT], /) -> complexfloating[_NBitT1, _NBitT2] | complexfloating[NBitT]: ... + @overload # type: ignore[override] + def __radd__(self, other: _Complex64_co, /) -> complexfloating[_NBitT1, _NBitT2]: ... @overload - def __sub__(self, other: _Complex64_co, /) -> complexfloating[_NBit1, _NBit2]: ... - @overload - def __sub__(self, other: complex | float64 | complex128, /) -> complexfloating[_NBit1, _NBit2] | complex128: ... - @overload - def __sub__(self, other: number[_NBit], /) -> complexfloating[_NBit1, _NBit2] | complexfloating[_NBit, _NBit]: ... - @overload - def __rsub__(self, other: _Complex64_co, /) -> complexfloating[_NBit1, _NBit2]: ... - @overload - def __rsub__(self, other: complex, /) -> complexfloating[_NBit1, _NBit2] | complex128: ... + def __radd__(self, other: complex, /) -> complexfloating[_NBitT1, _NBitT2] | complex128: ... @overload - def __rsub__(self, other: number[_NBit], /) -> complexfloating[_NBit1, _NBit2] | complexfloating[_NBit, _NBit]: ... + def __radd__[NBitT: NBitBase]( + self, other: number[NBitT], / + ) -> complexfloating[_NBitT1, _NBitT2] | complexfloating[NBitT]: ... + @overload # type: ignore[override] + def __sub__(self, other: _Complex64_co, /) -> complexfloating[_NBitT1, _NBitT2]: ... @overload - def __mul__(self, other: _Complex64_co, /) -> complexfloating[_NBit1, _NBit2]: ... + def __sub__(self, other: complex | float64 | complex128, /) -> complexfloating[_NBitT1, _NBitT2] | complex128: ... @overload - def __mul__(self, other: complex | float64 | complex128, /) -> complexfloating[_NBit1, _NBit2] | complex128: ... + def __sub__[NBitT: NBitBase](self, other: number[NBitT], /) -> complexfloating[_NBitT1, _NBitT2] | complexfloating[NBitT]: ... + + @overload # type: ignore[override] + def __rsub__(self, other: _Complex64_co, /) -> complexfloating[_NBitT1, _NBitT2]: ... @overload - def __mul__(self, other: number[_NBit], /) -> complexfloating[_NBit1, _NBit2] | complexfloating[_NBit, _NBit]: ... + def __rsub__(self, other: complex, /) -> complexfloating[_NBitT1, _NBitT2] | complex128: ... @overload - def __rmul__(self, other: _Complex64_co, /) -> complexfloating[_NBit1, _NBit2]: ... + def __rsub__[NBitT: NBitBase]( + self, other: number[NBitT], / + ) -> complexfloating[_NBitT1, _NBitT2] | complexfloating[NBitT]: ... + + @overload # type: ignore[override] + def __mul__(self, other: _Complex64_co, /) -> complexfloating[_NBitT1, _NBitT2]: ... @overload - def __rmul__(self, other: complex, /) -> complexfloating[_NBit1, _NBit2] | complex128: ... + def __mul__(self, other: complex | float64 | complex128, /) -> complexfloating[_NBitT1, _NBitT2] | complex128: ... @overload - def __rmul__(self, other: number[_NBit], /) -> complexfloating[_NBit1, _NBit2] | complexfloating[_NBit, _NBit]: ... + def __mul__[NBitT: NBitBase](self, other: number[NBitT], /) -> complexfloating[_NBitT1, _NBitT2] | complexfloating[NBitT]: ... + @overload # type: ignore[override] + def __rmul__(self, other: _Complex64_co, /) -> complexfloating[_NBitT1, _NBitT2]: ... @overload - def __truediv__(self, other: _Complex64_co, /) -> complexfloating[_NBit1, _NBit2]: ... + def __rmul__(self, other: complex, /) -> complexfloating[_NBitT1, _NBitT2] | complex128: ... @overload - def __truediv__(self, other: complex | float64 | complex128, /) -> complexfloating[_NBit1, _NBit2] | complex128: ... + def __rmul__[NBitT: NBitBase]( + self, other: number[NBitT], / + ) -> complexfloating[_NBitT1, _NBitT2] | complexfloating[NBitT]: ... + + @overload # type: ignore[override] + def __truediv__(self, other: _Complex64_co, /) -> complexfloating[_NBitT1, _NBitT2]: ... @overload - def __truediv__(self, other: number[_NBit], /) -> complexfloating[_NBit1, _NBit2] | complexfloating[_NBit, _NBit]: ... + def __truediv__(self, other: complex | float64 | complex128, /) -> complexfloating[_NBitT1, _NBitT2] | complex128: ... @overload - def __rtruediv__(self, other: _Complex64_co, /) -> complexfloating[_NBit1, _NBit2]: ... + def __truediv__[NBitT: NBitBase]( + self, other: number[NBitT], / + ) -> complexfloating[_NBitT1, _NBitT2] | complexfloating[NBitT]: ... + + @overload # type: ignore[override] + def __rtruediv__(self, other: _Complex64_co, /) -> complexfloating[_NBitT1, _NBitT2]: ... @overload - def __rtruediv__(self, other: complex, /) -> complexfloating[_NBit1, _NBit2] | complex128: ... + def __rtruediv__(self, other: complex, /) -> complexfloating[_NBitT1, _NBitT2] | complex128: ... @overload - def __rtruediv__(self, other: number[_NBit], /) -> complexfloating[_NBit1, _NBit2] | complexfloating[_NBit, _NBit]: ... + def __rtruediv__[NBitT: NBitBase]( + self, other: number[NBitT], / + ) -> complexfloating[_NBitT1, _NBitT2] | complexfloating[NBitT]: ... - @overload - def __pow__(self, other: _Complex64_co, mod: None = None, /) -> complexfloating[_NBit1, _NBit2]: ... + @overload # type: ignore[override] + def __pow__(self, other: _Complex64_co, mod: None = None, /) -> complexfloating[_NBitT1, _NBitT2]: ... @overload def __pow__( self, other: complex | float64 | complex128, mod: None = None, / - ) -> complexfloating[_NBit1, _NBit2] | complex128: ... - @overload - def __pow__( - self, other: number[_NBit], mod: None = None, / - ) -> complexfloating[_NBit1, _NBit2] | complexfloating[_NBit, _NBit]: ... - @overload - def __rpow__(self, other: _Complex64_co, mod: None = None, /) -> complexfloating[_NBit1, _NBit2]: ... + ) -> complexfloating[_NBitT1, _NBitT2] | complex128: ... @overload - def __rpow__(self, other: complex, mod: None = None, /) -> complexfloating[_NBit1, _NBit2] | complex128: ... - @overload - def __rpow__( - self, other: number[_NBit], mod: None = None, / - ) -> complexfloating[_NBit1, _NBit2] | complexfloating[_NBit, _NBit]: ... + def __pow__[NBitT: NBitBase]( + self, other: number[NBitT], mod: None = None, / + ) -> complexfloating[_NBitT1, _NBitT2] | complexfloating[NBitT]: ... -complex64: TypeAlias = complexfloating[_32Bit, _32Bit] - -class complex128(complexfloating[_64Bit, _64Bit], complex): # type: ignore[misc] + @overload # type: ignore[override] + def __rpow__(self, other: _Complex64_co, mod: None = None, /) -> complexfloating[_NBitT1, _NBitT2]: ... @overload - def __new__( - cls, - real: complex | SupportsComplex | SupportsFloat | SupportsIndex = ..., - imag: complex | SupportsFloat | SupportsIndex = ..., - /, - ) -> Self: ... + def __rpow__(self, other: complex, mod: None = None, /) -> complexfloating[_NBitT1, _NBitT2] | complex128: ... @overload - def __new__(cls, real: _ConvertibleToComplex | None = ..., /) -> Self: ... + def __rpow__[NBitT: NBitBase]( + self, other: number[NBitT], mod: None = None, / + ) -> complexfloating[_NBitT1, _NBitT2] | complexfloating[NBitT]: ... - # +complex64 = complexfloating[_32Bit] + +class complex128(complexfloating[_64Bit, _64Bit], complex): @property def itemsize(self) -> L[16]: ... @property @@ -4408,41 +5340,39 @@ class complex128(complexfloating[_64Bit, _64Bit], complex): # type: ignore[misc def __getnewargs__(self, /) -> tuple[float, float]: ... # complex128-specific operator overrides - @overload + @overload # type: ignore[override] def __add__(self, other: _Complex128_co, /) -> complex128: ... @overload - def __add__(self, other: complexfloating[_NBit1, _NBit2], /) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... - def __radd__(self, other: _Complex128_co, /) -> complex128: ... + def __add__[NBitT: NBitBase](self, other: complexfloating[NBitT], /) -> complexfloating[NBitT | _64Bit]: ... + def __radd__(self, other: _Complex128_co, /) -> complex128: ... # type: ignore[override] - @overload + @overload # type: ignore[override] def __sub__(self, other: _Complex128_co, /) -> complex128: ... @overload - def __sub__(self, other: complexfloating[_NBit1, _NBit2], /) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... - def __rsub__(self, other: _Complex128_co, /) -> complex128: ... + def __sub__[NBitT: NBitBase](self, other: complexfloating[NBitT], /) -> complexfloating[NBitT | _64Bit]: ... + def __rsub__(self, other: _Complex128_co, /) -> complex128: ... # type: ignore[override] - @overload + @overload # type: ignore[override] def __mul__(self, other: _Complex128_co, /) -> complex128: ... @overload - def __mul__(self, other: complexfloating[_NBit1, _NBit2], /) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... - def __rmul__(self, other: _Complex128_co, /) -> complex128: ... + def __mul__[NBitT: NBitBase](self, other: complexfloating[NBitT], /) -> complexfloating[NBitT | _64Bit]: ... + def __rmul__(self, other: _Complex128_co, /) -> complex128: ... # type: ignore[override] - @overload + @overload # type: ignore[override] def __truediv__(self, other: _Complex128_co, /) -> complex128: ... @overload - def __truediv__(self, other: complexfloating[_NBit1, _NBit2], /) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... - def __rtruediv__(self, other: _Complex128_co, /) -> complex128: ... + def __truediv__[NBitT: NBitBase](self, other: complexfloating[NBitT], /) -> complexfloating[NBitT | _64Bit]: ... + def __rtruediv__(self, other: _Complex128_co, /) -> complex128: ... # type: ignore[override] - @overload + @overload # type: ignore[override] def __pow__(self, other: _Complex128_co, mod: None = None, /) -> complex128: ... @overload - def __pow__( - self, other: complexfloating[_NBit1, _NBit2], mod: None = None, / - ) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... - def __rpow__(self, other: _Complex128_co, mod: None = None, /) -> complex128: ... + def __pow__[NBitT: NBitBase](self, other: complexfloating[NBitT], mod: None = None, /) -> complexfloating[NBitT | _64Bit]: ... + def __rpow__(self, other: _Complex128_co, mod: None = None, /) -> complex128: ... # type: ignore[override] -csingle: TypeAlias = complexfloating[_NBitSingle, _NBitSingle] -cdouble: TypeAlias = complexfloating[_NBitDouble, _NBitDouble] -clongdouble: TypeAlias = complexfloating[_NBitLongDouble, _NBitLongDouble] +csingle = complex64 +cdouble = complex128 +clongdouble = complexfloating[_NBitLongDouble] class timedelta64(_IntegralMixin, generic[_TD64ItemT_co], Generic[_TD64ItemT_co]): @property @@ -4451,26 +5381,26 @@ class timedelta64(_IntegralMixin, generic[_TD64ItemT_co], Generic[_TD64ItemT_co] def nbytes(self) -> L[8]: ... @overload - def __init__(self, value: _TD64ItemT_co | timedelta64[_TD64ItemT_co], /) -> None: ... + def __new__(cls, value: _TD64ItemT_co | timedelta64[_TD64ItemT_co], /) -> Self: ... @overload - def __init__(self: timedelta64[L[0]], /) -> None: ... + def __new__(cls, /) -> timedelta64[L[0]]: ... @overload - def __init__(self: timedelta64[None], value: _NaTValue | None, format: _TimeUnitSpec, /) -> None: ... + def __new__(cls, value: _NaTValue | None, format: _TimeUnitSpec[_TD64Unit], /) -> timedelta64[None]: ... @overload - def __init__(self: timedelta64[L[0]], value: L[0], format: _TimeUnitSpec[_IntTD64Unit] = ..., /) -> None: ... + def __new__(cls, value: L[0], format: _TimeUnitSpec[_IntTD64Unit] = ..., /) -> timedelta64[L[0]]: ... @overload - def __init__(self: timedelta64[int], value: _IntLike_co, format: _TimeUnitSpec[_IntTD64Unit] = ..., /) -> None: ... + def __new__(cls, value: _IntLike_co, format: _TimeUnitSpec[_IntTD64Unit] = ..., /) -> timedelta64[int]: ... @overload - def __init__(self: timedelta64[int], value: dt.timedelta, format: _TimeUnitSpec[_IntTimeUnit], /) -> None: ... + def __new__(cls, value: dt.timedelta, format: _TimeUnitSpec[_IntTimeUnit], /) -> timedelta64[int]: ... @overload - def __init__( - self: timedelta64[dt.timedelta], + def __new__( + cls, value: dt.timedelta | _IntLike_co, format: _TimeUnitSpec[_NativeTD64Unit] = ..., /, - ) -> None: ... + ) -> timedelta64[dt.timedelta]: ... @overload - def __init__(self, value: _ConvertibleToTD64, format: _TimeUnitSpec = ..., /) -> None: ... + def __new__(cls, value: _ConvertibleToTD64, format: _TimeUnitSpec[_TD64Unit] = ..., /) -> timedelta64: ... # inherited at runtime from `signedinteger` def __class_getitem__(cls, type_arg: type | object, /) -> GenericAlias: ... @@ -4484,149 +5414,206 @@ class timedelta64(_IntegralMixin, generic[_TD64ItemT_co], Generic[_TD64ItemT_co] def __pos__(self, /) -> Self: ... def __abs__(self, /) -> Self: ... + # + @overload + def __add__(self: timedelta64[Never], x: timedelta64[int | dt.timedelta] | _IntLike_co, /) -> timedelta64: ... @overload def __add__(self: timedelta64[None], x: _TD64Like_co, /) -> timedelta64[None]: ... @overload + def __add__(self: timedelta64[int | dt.timedelta], x: timedelta64[Never], /) -> timedelta64: ... + @overload + def __add__(self, x: timedelta64[None], /) -> timedelta64[None]: ... + @overload def __add__(self: timedelta64[int], x: timedelta64[int | dt.timedelta], /) -> timedelta64[int]: ... @overload def __add__(self: timedelta64[int], x: timedelta64, /) -> timedelta64[int | None]: ... @overload - def __add__(self: timedelta64[dt.timedelta], x: _AnyDateOrTime, /) -> _AnyDateOrTime: ... + def __add__[AnyDateOrTimeT: (dt.datetime, dt.date, dt.timedelta)]( + self: timedelta64[dt.timedelta], x: AnyDateOrTimeT, / + ) -> AnyDateOrTimeT: ... @overload - def __add__(self: timedelta64[_AnyTD64Item], x: timedelta64[_AnyTD64Item] | _IntLike_co, /) -> timedelta64[_AnyTD64Item]: ... - @overload - def __add__(self, x: timedelta64[None], /) -> timedelta64[None]: ... + def __add__[AnyItemT: (dt.timedelta, int, None, _TD64Item)]( + self: timedelta64[AnyItemT], x: timedelta64[AnyItemT] | _IntLike_co, / + ) -> timedelta64[AnyItemT]: ... __radd__ = __add__ + # + @overload + def __sub__(self: timedelta64[Never], b: timedelta64[int | dt.timedelta] | _IntLike_co, /) -> timedelta64: ... @overload - def __mul__(self: timedelta64[_AnyTD64Item], x: int | np.integer | np.bool, /) -> timedelta64[_AnyTD64Item]: ... + def __sub__(self: timedelta64[None], b: _TD64Like_co, /) -> timedelta64[None]: ... @overload - def __mul__(self: timedelta64[_AnyTD64Item], x: float | np.floating, /) -> timedelta64[_AnyTD64Item | None]: ... + def __sub__(self: timedelta64[int | dt.timedelta], b: timedelta64[Never], /) -> timedelta64: ... @overload - def __mul__(self, x: float | np.floating | np.integer | np.bool, /) -> timedelta64: ... - __rmul__ = __mul__ + def __sub__(self, b: timedelta64[None], /) -> timedelta64[None]: ... + @overload + def __sub__(self: timedelta64[int], b: timedelta64[int | dt.timedelta], /) -> timedelta64[int]: ... + @overload + def __sub__(self: timedelta64[int], b: timedelta64, /) -> timedelta64[int | None]: ... + @overload + def __sub__(self: timedelta64[dt.timedelta], b: dt.timedelta, /) -> dt.timedelta: ... + @overload + def __sub__[AnyItemT: (dt.timedelta, int, None, _TD64Item)]( + self: timedelta64[AnyItemT], b: timedelta64[AnyItemT] | _IntLike_co, / + ) -> timedelta64[AnyItemT]: ... + # NOTE: subtraction is not commutative, so __rsub__ differs from __sub__. + # This confuses mypy, so we ignore the [misc] errors it reports. @overload - def __mod__(self, x: timedelta64[L[0] | None], /) -> timedelta64[None]: ... + def __rsub__(self: timedelta64[Never], a: timedelta64[int | dt.timedelta] | _IntLike_co, /) -> timedelta64: ... @overload - def __mod__(self: timedelta64[None], x: timedelta64, /) -> timedelta64[None]: ... + def __rsub__(self: timedelta64[None], a: _TD64Like_co, /) -> timedelta64[None]: ... @overload - def __mod__(self: timedelta64[int], x: timedelta64[int | dt.timedelta], /) -> timedelta64[int | None]: ... + def __rsub__[AnyDateT: (dt.datetime, dt.date)](self: timedelta64[dt.timedelta], a: AnyDateT, /) -> AnyDateT: ... @overload - def __mod__(self: timedelta64[dt.timedelta], x: timedelta64[_AnyTD64Item], /) -> timedelta64[_AnyTD64Item | None]: ... + def __rsub__[AnyItemT: (dt.timedelta, int, None)]( + self: timedelta64[dt.timedelta], a: timedelta64[AnyItemT], / + ) -> timedelta64[AnyItemT]: ... @overload - def __mod__(self: timedelta64[dt.timedelta], x: dt.timedelta, /) -> dt.timedelta: ... + def __rsub__[AnyItemT: (dt.timedelta, int, None, _TD64Item)]( # type: ignore[misc] + self: timedelta64[AnyItemT], a: timedelta64[AnyItemT] | _IntLike_co, / + ) -> timedelta64[AnyItemT]: ... @overload - def __mod__(self, x: timedelta64[int], /) -> timedelta64[int | None]: ... + def __rsub__(self, a: timedelta64[None], /) -> timedelta64[None]: ... # type: ignore[overload-cannot-match] @overload - def __mod__(self, x: timedelta64, /) -> timedelta64: ... + def __rsub__(self, a: datetime64[None], /) -> datetime64[None]: ... - # the L[0] makes __mod__ non-commutative, which the first two overloads reflect + # + @overload + def __mul__(self: timedelta64[Never], x: _FloatLike_co, /) -> timedelta64: ... + @overload + def __mul__(self: timedelta64[None], x: _FloatLike_co, /) -> timedelta64[None]: ... + @overload + def __mul__(self, x: _IntLike_co, /) -> Self: ... + @overload + def __mul__(self, x: float | floating, /) -> timedelta64[_TD64ItemT_co | None]: ... + @overload + def __mul__(self, x: _FloatLike_co, /) -> timedelta64: ... + __rmul__ = __mul__ + + # keep in sync with __divmod__ + @overload + def __mod__(self: timedelta64[Never], x: timedelta64[dt.timedelta], /) -> timedelta64: ... + @overload + def __mod__(self: timedelta64[int | dt.timedelta], x: timedelta64[Never], /) -> timedelta64: ... + @overload + def __mod__(self, x: timedelta64[L[0] | None], /) -> timedelta64[None]: ... + @overload + def __mod__(self, x: timedelta64[int], /) -> timedelta64[int | None]: ... + @overload + def __mod__(self: timedelta64[None], x: timedelta64, /) -> timedelta64[None]: ... @overload - def __rmod__(self, x: timedelta64[None], /) -> timedelta64[None]: ... + def __mod__(self: timedelta64[int], x: timedelta64[int | dt.timedelta], /) -> timedelta64[int | None]: ... @overload - def __rmod__(self: timedelta64[L[0] | None], x: timedelta64, /) -> timedelta64[None]: ... + def __mod__(self: timedelta64[dt.timedelta], x: timedelta64[int], /) -> timedelta64[int | None]: ... @overload - def __rmod__(self: timedelta64[int], x: timedelta64[int | dt.timedelta], /) -> timedelta64[int | None]: ... + def __mod__(self: timedelta64[dt.timedelta], x: timedelta64[dt.timedelta], /) -> timedelta64[dt.timedelta | None]: ... @overload - def __rmod__(self: timedelta64[dt.timedelta], x: timedelta64[_AnyTD64Item], /) -> timedelta64[_AnyTD64Item | None]: ... + def __mod__(self: timedelta64[dt.timedelta], x: dt.timedelta, /) -> dt.timedelta: ... @overload + def __mod__(self, x: timedelta64, /) -> timedelta64: ... + + # keep in sync with __rdivmod__ def __rmod__(self: timedelta64[dt.timedelta], x: dt.timedelta, /) -> dt.timedelta: ... - @overload - def __rmod__(self, x: timedelta64[int], /) -> timedelta64[int | None]: ... - @overload - def __rmod__(self, x: timedelta64, /) -> timedelta64: ... # keep in sync with __mod__ @overload + def __divmod__( + self: timedelta64[Never], x: timedelta64[Never] | timedelta64[dt.timedelta], / + ) -> tuple[int64, timedelta64]: ... + @overload + def __divmod__(self: timedelta64[int | dt.timedelta], x: timedelta64[Never], /) -> tuple[int64, timedelta64]: ... + @overload def __divmod__(self, x: timedelta64[L[0] | None], /) -> tuple[int64, timedelta64[None]]: ... @overload + def __divmod__(self, x: timedelta64[int], /) -> tuple[int64, timedelta64[int | None]]: ... + @overload def __divmod__(self: timedelta64[None], x: timedelta64, /) -> tuple[int64, timedelta64[None]]: ... @overload def __divmod__(self: timedelta64[int], x: timedelta64[int | dt.timedelta], /) -> tuple[int64, timedelta64[int | None]]: ... @overload - def __divmod__(self: timedelta64[dt.timedelta], x: timedelta64[_AnyTD64Item], /) -> tuple[int64, timedelta64[_AnyTD64Item | None]]: ... + def __divmod__(self: timedelta64[dt.timedelta], x: timedelta64[int], /) -> tuple[int64, timedelta64[int | None]]: ... @overload - def __divmod__(self: timedelta64[dt.timedelta], x: dt.timedelta, /) -> tuple[int, dt.timedelta]: ... + def __divmod__( + self: timedelta64[dt.timedelta], x: timedelta64[dt.timedelta], / + ) -> tuple[int64, timedelta64[dt.timedelta | None]]: ... @overload - def __divmod__(self, x: timedelta64[int], /) -> tuple[int64, timedelta64[int | None]]: ... + def __divmod__(self: timedelta64[dt.timedelta], x: dt.timedelta, /) -> tuple[int, dt.timedelta]: ... @overload def __divmod__(self, x: timedelta64, /) -> tuple[int64, timedelta64]: ... # keep in sync with __rmod__ - @overload - def __rdivmod__(self, x: timedelta64[None], /) -> tuple[int64, timedelta64[None]]: ... - @overload - def __rdivmod__(self: timedelta64[L[0] | None], x: timedelta64, /) -> tuple[int64, timedelta64[None]]: ... - @overload - def __rdivmod__(self: timedelta64[int], x: timedelta64[int | dt.timedelta], /) -> tuple[int64, timedelta64[int | None]]: ... - @overload - def __rdivmod__(self: timedelta64[dt.timedelta], x: timedelta64[_AnyTD64Item], /) -> tuple[int64, timedelta64[_AnyTD64Item | None]]: ... - @overload def __rdivmod__(self: timedelta64[dt.timedelta], x: dt.timedelta, /) -> tuple[int, dt.timedelta]: ... - @overload - def __rdivmod__(self, x: timedelta64[int], /) -> tuple[int64, timedelta64[int | None]]: ... - @overload - def __rdivmod__(self, x: timedelta64, /) -> tuple[int64, timedelta64]: ... @overload - def __sub__(self: timedelta64[None], b: _TD64Like_co, /) -> timedelta64[None]: ... + def __truediv__(self, b: timedelta64, /) -> float64: ... @overload - def __sub__(self: timedelta64[int], b: timedelta64[int | dt.timedelta], /) -> timedelta64[int]: ... + def __truediv__(self: timedelta64[dt.timedelta], b: dt.timedelta, /) -> float: ... @overload - def __sub__(self: timedelta64[int], b: timedelta64, /) -> timedelta64[int | None]: ... + def __truediv__(self: timedelta64[Never], b: float | floating | integer, /) -> timedelta64: ... @overload - def __sub__(self: timedelta64[dt.timedelta], b: dt.timedelta, /) -> dt.timedelta: ... + def __truediv__[AnyItemT: (dt.timedelta, int, None)]( + self: timedelta64[AnyItemT], b: int | integer, / + ) -> timedelta64[AnyItemT]: ... @overload - def __sub__(self: timedelta64[_AnyTD64Item], b: timedelta64[_AnyTD64Item] | _IntLike_co, /) -> timedelta64[_AnyTD64Item]: ... + def __truediv__[AnyItemT: (dt.timedelta, int, None)]( + self: timedelta64[AnyItemT], b: float | floating, / + ) -> timedelta64[AnyItemT | None]: ... @overload - def __sub__(self, b: timedelta64[None], /) -> timedelta64[None]: ... + def __truediv__(self, b: float | floating | integer, /) -> timedelta64: ... @overload - def __rsub__(self: timedelta64[None], a: _TD64Like_co, /) -> timedelta64[None]: ... + def __rtruediv__(self, a: timedelta64, /) -> float64: ... + @overload + def __rtruediv__(self: timedelta64[dt.timedelta], a: dt.timedelta, /) -> float: ... + @overload - def __rsub__(self: timedelta64[dt.timedelta], a: _AnyDateOrTime, /) -> _AnyDateOrTime: ... + def __floordiv__(self, b: timedelta64, /) -> int64: ... @overload - def __rsub__(self: timedelta64[dt.timedelta], a: timedelta64[_AnyTD64Item], /) -> timedelta64[_AnyTD64Item]: ... + def __floordiv__(self: timedelta64[dt.timedelta], b: dt.timedelta, /) -> int: ... @overload - def __rsub__(self: timedelta64[_AnyTD64Item], a: timedelta64[_AnyTD64Item] | _IntLike_co, /) -> timedelta64[_AnyTD64Item]: ... + def __floordiv__(self: timedelta64[Never], b: float | floating | integer, /) -> timedelta64: ... @overload - def __rsub__(self, a: timedelta64[None], /) -> timedelta64[None]: ... + def __floordiv__[AnyItemT: (dt.timedelta, int, None)]( + self: timedelta64[AnyItemT], b: int | integer, / + ) -> timedelta64[AnyItemT]: ... @overload - def __rsub__(self, a: datetime64[None], /) -> datetime64[None]: ... + def __floordiv__[AnyItemT: (dt.timedelta, int, None)]( + self: timedelta64[AnyItemT], b: float | floating, / + ) -> timedelta64[AnyItemT | None]: ... @overload - def __truediv__(self: timedelta64[dt.timedelta], b: dt.timedelta, /) -> float: ... + def __rfloordiv__(self, a: timedelta64, /) -> int64: ... @overload - def __truediv__(self, b: timedelta64, /) -> float64: ... + def __rfloordiv__(self: timedelta64[dt.timedelta], a: dt.timedelta, /) -> int: ... + @overload - def __truediv__(self: timedelta64[_AnyTD64Item], b: int | integer, /) -> timedelta64[_AnyTD64Item]: ... + def __lt__(self, other: _TD64Like_co, /) -> bool_: ... @overload - def __truediv__(self: timedelta64[_AnyTD64Item], b: float | floating, /) -> timedelta64[_AnyTD64Item | None]: ... + def __lt__(self, other: _ArrayLikeTD64_co | _NestedSequence[_SupportsGT], /) -> NDArray[bool_]: ... @overload - def __truediv__(self, b: float | floating | integer, /) -> timedelta64: ... + def __lt__(self, other: _SupportsGT, /) -> bool_: ... + @overload - def __rtruediv__(self: timedelta64[dt.timedelta], a: dt.timedelta, /) -> float: ... + def __le__(self, other: _TD64Like_co, /) -> bool_: ... @overload - def __rtruediv__(self, a: timedelta64, /) -> float64: ... + def __le__(self, other: _ArrayLikeTD64_co | _NestedSequence[_SupportsGE], /) -> NDArray[bool_]: ... + @overload + def __le__(self, other: _SupportsGT, /) -> bool_: ... @overload - def __floordiv__(self: timedelta64[dt.timedelta], b: dt.timedelta, /) -> int: ... + def __gt__(self, other: _TD64Like_co, /) -> bool_: ... @overload - def __floordiv__(self, b: timedelta64, /) -> int64: ... + def __gt__(self, other: _ArrayLikeTD64_co | _NestedSequence[_SupportsLT], /) -> NDArray[bool_]: ... @overload - def __floordiv__(self: timedelta64[_AnyTD64Item], b: int | integer, /) -> timedelta64[_AnyTD64Item]: ... + def __gt__(self, other: _SupportsGT, /) -> bool_: ... + @overload - def __floordiv__(self: timedelta64[_AnyTD64Item], b: float | floating, /) -> timedelta64[_AnyTD64Item | None]: ... + def __ge__(self, other: _TD64Like_co, /) -> bool_: ... @overload - def __rfloordiv__(self: timedelta64[dt.timedelta], a: dt.timedelta, /) -> int: ... + def __ge__(self, other: _ArrayLikeTD64_co | _NestedSequence[_SupportsLE], /) -> NDArray[bool_]: ... @overload - def __rfloordiv__(self, a: timedelta64, /) -> int64: ... - - __lt__: _ComparisonOpLT[_TD64Like_co, _ArrayLikeTD64_co] - __le__: _ComparisonOpLE[_TD64Like_co, _ArrayLikeTD64_co] - __gt__: _ComparisonOpGT[_TD64Like_co, _ArrayLikeTD64_co] - __ge__: _ComparisonOpGE[_TD64Like_co, _ArrayLikeTD64_co] + def __ge__(self, other: _SupportsGT, /) -> bool_: ... class datetime64(_RealMixin, generic[_DT64ItemT_co], Generic[_DT64ItemT_co]): @property @@ -4635,30 +5622,38 @@ class datetime64(_RealMixin, generic[_DT64ItemT_co], Generic[_DT64ItemT_co]): def nbytes(self) -> L[8]: ... @overload - def __init__(self, value: datetime64[_DT64ItemT_co], /) -> None: ... + def __new__(cls, value: datetime64[_DT64ItemT_co], /) -> Self: ... @overload - def __init__(self: datetime64[_AnyDT64Arg], value: _AnyDT64Arg, /) -> None: ... + def __new__[AnyItemT: (dt.datetime, dt.date, None)](cls, value: AnyItemT, /) -> datetime64[AnyItemT]: ... @overload - def __init__(self: datetime64[None], value: _NaTValue | None = ..., format: _TimeUnitSpec = ..., /) -> None: ... + def __new__(cls, value: _NaTValue | None = ..., format: _TimeUnitSpec[_TD64Unit] = ..., /) -> datetime64[None]: ... @overload - def __init__(self: datetime64[dt.datetime], value: _DT64Now, format: _TimeUnitSpec[_NativeTimeUnit] = ..., /) -> None: ... + def __new__(cls, value: _DT64Now, format: _TimeUnitSpec[_NativeTimeUnit] = ..., /) -> datetime64[dt.datetime]: ... @overload - def __init__(self: datetime64[dt.date], value: _DT64Date, format: _TimeUnitSpec[_DateUnit] = ..., /) -> None: ... + def __new__(cls, value: _DT64Date, format: _TimeUnitSpec[_DateUnit] = ..., /) -> datetime64[dt.date]: ... @overload - def __init__(self: datetime64[int], value: int | bytes | str | dt.date, format: _TimeUnitSpec[_IntTimeUnit], /) -> None: ... + def __new__(cls, value: int | bytes | str | dt.date, format: _TimeUnitSpec[_IntTimeUnit], /) -> datetime64[int]: ... @overload - def __init__( - self: datetime64[dt.datetime], value: int | bytes | str | dt.date, format: _TimeUnitSpec[_NativeTimeUnit], / - ) -> None: ... + def __new__( # type: ignore[overload-cannot-match] + cls, value: int | bytes | str | dt.date, format: _TimeUnitSpec[_NativeTimeUnit], / + ) -> datetime64[dt.datetime]: ... @overload - def __init__(self: datetime64[dt.date], value: int | bytes | str | dt.date, format: _TimeUnitSpec[_DateUnit], /) -> None: ... + def __new__(cls, value: int | bytes | str | dt.date, format: _TimeUnitSpec[_DateUnit], /) -> datetime64[dt.date]: ... # type: ignore[overload-cannot-match] @overload - def __init__(self, value: bytes | str | dt.date | None, format: _TimeUnitSpec = ..., /) -> None: ... + def __new__(cls, value: bytes | str | dt.date | None, format: _TimeUnitSpec[_TD64Unit] = ..., /) -> Self: ... + + # + def __class_getitem__(cls, type_arg: type | object, /) -> GenericAlias: ... + # + @overload + def __add__(self: datetime64[Never], x: _TD64Like_co, /) -> datetime64: ... + @overload + def __add__(self, x: _IntLike_co, /) -> Self: ... @overload - def __add__(self: datetime64[_AnyDT64Item], x: int | integer | np.bool, /) -> datetime64[_AnyDT64Item]: ... + def __add__(self: datetime64[None], x: timedelta64, /) -> datetime64[None]: ... @overload - def __add__(self: datetime64[None], x: _TD64Like_co, /) -> datetime64[None]: ... + def __add__(self: datetime64[int | dt.datetime], x: timedelta64[Never], /) -> datetime64: ... @overload def __add__(self: datetime64[int], x: timedelta64[int | dt.timedelta], /) -> datetime64[int]: ... @overload @@ -4668,15 +5663,20 @@ class datetime64(_RealMixin, generic[_DT64ItemT_co], Generic[_DT64ItemT_co]): @overload def __add__(self: datetime64[dt.date], x: timedelta64[int], /) -> datetime64[int]: ... @overload - def __add__(self, x: datetime64[None], /) -> datetime64[None]: ... + def __add__(self, x: timedelta64[None], /) -> datetime64[None]: ... @overload def __add__(self, x: _TD64Like_co, /) -> datetime64: ... __radd__ = __add__ + # + @overload + def __sub__(self: datetime64[Never], x: _TD64Like_co, /) -> datetime64: ... @overload - def __sub__(self: datetime64[_AnyDT64Item], x: int | integer | np.bool, /) -> datetime64[_AnyDT64Item]: ... + def __sub__(self: datetime64[Never], x: datetime64, /) -> timedelta64: ... @overload - def __sub__(self: datetime64[_AnyDate], x: _AnyDate, /) -> dt.timedelta: ... + def __sub__(self, x: _IntLike_co, /) -> Self: ... + @overload + def __sub__(self: datetime64[dt.date], x: dt.date, /) -> dt.timedelta: ... @overload def __sub__(self: datetime64[None], x: timedelta64, /) -> datetime64[None]: ... @overload @@ -4706,10 +5706,13 @@ class datetime64(_RealMixin, generic[_DT64ItemT_co], Generic[_DT64ItemT_co]): @overload def __sub__(self, x: datetime64, /) -> timedelta64: ... + # NOTE: mypy gets confused by the non-commutativity of subtraction here + @overload + def __rsub__(self: datetime64[Never], x: datetime64, /) -> timedelta64: ... @overload - def __rsub__(self: datetime64[_AnyDT64Item], x: int | integer | np.bool, /) -> datetime64[_AnyDT64Item]: ... + def __rsub__(self, x: _IntLike_co, /) -> Self: ... @overload - def __rsub__(self: datetime64[_AnyDate], x: _AnyDate, /) -> dt.timedelta: ... + def __rsub__(self: datetime64[dt.date], x: dt.date, /) -> dt.timedelta: ... @overload def __rsub__(self: datetime64[None], x: datetime64, /) -> timedelta64[None]: ... @overload @@ -4723,69 +5726,107 @@ class datetime64(_RealMixin, generic[_DT64ItemT_co], Generic[_DT64ItemT_co]): @overload def __rsub__(self, x: datetime64, /) -> timedelta64: ... - __lt__: _ComparisonOpLT[datetime64, _ArrayLikeDT64_co] - __le__: _ComparisonOpLE[datetime64, _ArrayLikeDT64_co] - __gt__: _ComparisonOpGT[datetime64, _ArrayLikeDT64_co] - __ge__: _ComparisonOpGE[datetime64, _ArrayLikeDT64_co] + # + @overload + def __lt__(self, other: datetime64, /) -> bool_: ... + @overload + def __lt__(self, other: _ArrayLikeDT64_co | _NestedSequence[_SupportsGT], /) -> NDArray[bool_]: ... + @overload + def __lt__(self, other: _SupportsGT, /) -> bool_: ... + + @overload + def __le__(self, other: datetime64, /) -> bool_: ... + @overload + def __le__(self, other: _ArrayLikeDT64_co | _NestedSequence[_SupportsGE], /) -> NDArray[bool_]: ... + @overload + def __le__(self, other: _SupportsGT, /) -> bool_: ... + + @overload + def __gt__(self, other: datetime64, /) -> bool_: ... + @overload + def __gt__(self, other: _ArrayLikeDT64_co | _NestedSequence[_SupportsLT], /) -> NDArray[bool_]: ... + @overload + def __gt__(self, other: _SupportsGT, /) -> bool_: ... + + @overload + def __ge__(self, other: datetime64, /) -> bool_: ... + @overload + def __ge__(self, other: _ArrayLikeDT64_co | _NestedSequence[_SupportsLE], /) -> NDArray[bool_]: ... + @overload + def __ge__(self, other: _SupportsGT, /) -> bool_: ... -class flexible(_RealMixin, generic[_FlexibleItemT_co], Generic[_FlexibleItemT_co]): ... +@final # cannot be subclassed at runtime +class flexible(_RealMixin, generic[_FlexibleItemT_co], Generic[_FlexibleItemT_co]): ... # type: ignore[misc] -class void(flexible[bytes | tuple[Any, ...]]): +class void(flexible[bytes | tuple[Any, ...]]): # type: ignore[misc] @overload - def __init__(self, value: _IntLike_co | bytes, /, dtype: None = None) -> None: ... + def __new__(cls, length_or_data: _IntLike_co | bytes, /, dtype: None = None) -> Self: ... @overload - def __init__(self, value: Any, /, dtype: _DTypeLikeVoid) -> None: ... + def __new__(cls, length_or_data: object, /, dtype: _DTypeLikeVoid) -> Self: ... + # + @overload + def __getitem__(self, key: tuple[()], /) -> Self: ... + @overload + def __getitem__( + self, key: EllipsisType | tuple[EllipsisType], / + ) -> ndarray[tuple[()], dtype[Self]]: ... + @overload + def __getitem__( + self, key: None | tuple[None], / + ) -> ndarray[tuple[int], dtype[Self]]: ... + @overload + def __getitem__( + self, key: tuple[None, None], / + ) -> ndarray[tuple[int, int], dtype[Self]]: ... + @overload + def __getitem__( + self, key: tuple[None, None, None], / + ) -> ndarray[tuple[int, int, int], dtype[Self]]: ... + @overload # Limited support for (None,) * N > 3 + def __getitem__(self, key: tuple[None, ...], /) -> NDArray[Self]: ... @overload def __getitem__(self, key: str | SupportsIndex, /) -> Any: ... @overload def __getitem__(self, key: list[str], /) -> void: ... + + # def __setitem__(self, key: str | list[str] | SupportsIndex, value: ArrayLike, /) -> None: ... def setfield(self, val: ArrayLike, dtype: DTypeLike, offset: int = ...) -> None: ... -class character(flexible[_CharacterItemT_co], Generic[_CharacterItemT_co]): +class character(flexible[_CharacterItemT_co], Generic[_CharacterItemT_co]): # type: ignore[misc] @abstractmethod - def __init__(self, value: _CharacterItemT_co = ..., /) -> None: ... + def __new__(cls, value: object = ..., /) -> Self: ... # NOTE: Most `np.bytes_` / `np.str_` methods return their builtin `bytes` / `str` counterpart -class bytes_(character[bytes], bytes): - @overload - def __new__(cls, o: object = ..., /) -> Self: ... - @overload - def __new__(cls, s: str, /, encoding: str, errors: str = ...) -> Self: ... - - # +class bytes_(character[bytes], bytes): # type: ignore[misc] @overload - def __init__(self, o: object = ..., /) -> None: ... + def __new__(cls, value: object = b"", /) -> Self: ... @overload - def __init__(self, s: str, /, encoding: str, errors: str = ...) -> None: ... + def __new__(cls, value: str, /, encoding: str, errors: str = "strict") -> Self: ... # def __bytes__(self, /) -> bytes: ... -class str_(character[str], str): - @overload - def __new__(cls, value: object = ..., /) -> Self: ... - @overload - def __new__(cls, value: bytes, /, encoding: str = ..., errors: str = ...) -> Self: ... - - # +class str_(character[str], str): # type: ignore[misc] @overload - def __init__(self, value: object = ..., /) -> None: ... + def __new__(cls, value: object = "", /) -> Self: ... @overload - def __init__(self, value: bytes, /, encoding: str = ..., errors: str = ...) -> None: ... + def __new__(cls, value: bytes, /, encoding: str, errors: str = "strict") -> Self: ... # See `numpy._typing._ufunc` for more concrete nin-/nout-specific stubs @final class ufunc: + __signature__: Final[inspect.Signature] + @property def __name__(self) -> LiteralString: ... @property - def __qualname__(self) -> LiteralString: ... + def __qualname__(self) -> LiteralString: ... # pyright: ignore[reportIncompatibleVariableOverride] @property - def __doc__(self) -> str: ... + def __doc__(self) -> str: ... # type: ignore[override] @property def nin(self) -> int: ... @property @@ -4812,18 +5853,43 @@ class ufunc: @property def signature(self) -> LiteralString | None: ... - def __call__(self, *args: Any, **kwargs: Any) -> Any: ... + def __call__(self, /, *args: Any, **kwargs: Any) -> Any: ... + # The next four methods will always exist, but they will just # raise a ValueError ufuncs with that don't accept two input # arguments and return one output argument. Because of that we # can't type them very precisely. - def reduce(self, /, *args: Any, **kwargs: Any) -> Any: ... - def accumulate(self, /, *args: Any, **kwargs: Any) -> NDArray[Any]: ... - def reduceat(self, /, *args: Any, **kwargs: Any) -> NDArray[Any]: ... - def outer(self, *args: Any, **kwargs: Any) -> Any: ... - # Similarly at won't be defined for ufuncs that return multiple + def accumulate( + self, + array: ArrayLike, + /, + axis: SupportsIndex = 0, + dtype: DTypeLike | None = None, + out: ndarray | EllipsisType | None = None, + ) -> NDArray[Incomplete]: ... + def reduce( + self, + array: ArrayLike, + /, + axis: _ShapeLike | None = 0, + dtype: DTypeLike | None = None, + out: ndarray | EllipsisType | None = None, + **kwargs: Incomplete, + ) -> Incomplete: ... + def reduceat( + self, + array: ArrayLike, + /, + indices: _ArrayLikeInt_co, + axis: SupportsIndex = 0, + dtype: DTypeLike | None = None, + out: ndarray | EllipsisType | None = None, + ) -> NDArray[Incomplete]: ... + def outer(self, A: ArrayLike, B: ArrayLike, /, **kwargs: Incomplete) -> NDArray[Incomplete]: ... + + # Similarly `at` won't be defined for ufuncs that return multiple # outputs, so we can't type it very precisely. - def at(self, /, *args: Any, **kwargs: Any) -> None: ... + def at(self, a: ndarray, indices: _ArrayLikeInt_co, b: ArrayLike | None = None, /) -> None: ... # def resolve_dtypes( @@ -4833,104 +5899,100 @@ class ufunc: *, signature: tuple[dtype | None, ...] | None = None, casting: _CastingKind | None = None, - reduction: builtins.bool = False, + reduction: py_bool = False, ) -> tuple[dtype, ...]: ... # Parameters: `__name__`, `ntypes` and `identity` -absolute: _UFunc_Nin1_Nout1[L['absolute'], L[20], None] -add: _UFunc_Nin2_Nout1[L['add'], L[22], L[0]] -arccos: _UFunc_Nin1_Nout1[L['arccos'], L[8], None] -arccosh: _UFunc_Nin1_Nout1[L['arccosh'], L[8], None] -arcsin: _UFunc_Nin1_Nout1[L['arcsin'], L[8], None] -arcsinh: _UFunc_Nin1_Nout1[L['arcsinh'], L[8], None] -arctan2: _UFunc_Nin2_Nout1[L['arctan2'], L[5], None] -arctan: _UFunc_Nin1_Nout1[L['arctan'], L[8], None] -arctanh: _UFunc_Nin1_Nout1[L['arctanh'], L[8], None] -bitwise_and: _UFunc_Nin2_Nout1[L['bitwise_and'], L[12], L[-1]] -bitwise_count: _UFunc_Nin1_Nout1[L['bitwise_count'], L[11], None] -bitwise_not: _UFunc_Nin1_Nout1[L['invert'], L[12], None] -bitwise_or: _UFunc_Nin2_Nout1[L['bitwise_or'], L[12], L[0]] -bitwise_xor: _UFunc_Nin2_Nout1[L['bitwise_xor'], L[12], L[0]] -cbrt: _UFunc_Nin1_Nout1[L['cbrt'], L[5], None] -ceil: _UFunc_Nin1_Nout1[L['ceil'], L[7], None] -conj: _UFunc_Nin1_Nout1[L['conjugate'], L[18], None] -conjugate: _UFunc_Nin1_Nout1[L['conjugate'], L[18], None] -copysign: _UFunc_Nin2_Nout1[L['copysign'], L[4], None] -cos: _UFunc_Nin1_Nout1[L['cos'], L[9], None] -cosh: _UFunc_Nin1_Nout1[L['cosh'], L[8], None] -deg2rad: _UFunc_Nin1_Nout1[L['deg2rad'], L[5], None] -degrees: _UFunc_Nin1_Nout1[L['degrees'], L[5], None] -divide: _UFunc_Nin2_Nout1[L['true_divide'], L[11], None] -divmod: _UFunc_Nin2_Nout2[L['divmod'], L[15], None] -equal: _UFunc_Nin2_Nout1[L['equal'], L[23], None] -exp2: _UFunc_Nin1_Nout1[L['exp2'], L[8], None] -exp: _UFunc_Nin1_Nout1[L['exp'], L[10], None] -expm1: _UFunc_Nin1_Nout1[L['expm1'], L[8], None] -fabs: _UFunc_Nin1_Nout1[L['fabs'], L[5], None] -float_power: _UFunc_Nin2_Nout1[L['float_power'], L[4], None] -floor: _UFunc_Nin1_Nout1[L['floor'], L[7], None] -floor_divide: _UFunc_Nin2_Nout1[L['floor_divide'], L[21], None] -fmax: _UFunc_Nin2_Nout1[L['fmax'], L[21], None] -fmin: _UFunc_Nin2_Nout1[L['fmin'], L[21], None] -fmod: _UFunc_Nin2_Nout1[L['fmod'], L[15], None] -frexp: _UFunc_Nin1_Nout2[L['frexp'], L[4], None] -gcd: _UFunc_Nin2_Nout1[L['gcd'], L[11], L[0]] -greater: _UFunc_Nin2_Nout1[L['greater'], L[23], None] -greater_equal: _UFunc_Nin2_Nout1[L['greater_equal'], L[23], None] -heaviside: _UFunc_Nin2_Nout1[L['heaviside'], L[4], None] -hypot: _UFunc_Nin2_Nout1[L['hypot'], L[5], L[0]] -invert: _UFunc_Nin1_Nout1[L['invert'], L[12], None] -isfinite: _UFunc_Nin1_Nout1[L['isfinite'], L[20], None] -isinf: _UFunc_Nin1_Nout1[L['isinf'], L[20], None] -isnan: _UFunc_Nin1_Nout1[L['isnan'], L[20], None] -isnat: _UFunc_Nin1_Nout1[L['isnat'], L[2], None] -lcm: _UFunc_Nin2_Nout1[L['lcm'], L[11], None] -ldexp: _UFunc_Nin2_Nout1[L['ldexp'], L[8], None] -left_shift: _UFunc_Nin2_Nout1[L['left_shift'], L[11], None] -less: _UFunc_Nin2_Nout1[L['less'], L[23], None] -less_equal: _UFunc_Nin2_Nout1[L['less_equal'], L[23], None] -log10: _UFunc_Nin1_Nout1[L['log10'], L[8], None] -log1p: _UFunc_Nin1_Nout1[L['log1p'], L[8], None] -log2: _UFunc_Nin1_Nout1[L['log2'], L[8], None] -log: _UFunc_Nin1_Nout1[L['log'], L[10], None] -logaddexp2: _UFunc_Nin2_Nout1[L['logaddexp2'], L[4], float] -logaddexp: _UFunc_Nin2_Nout1[L['logaddexp'], L[4], float] -logical_and: _UFunc_Nin2_Nout1[L['logical_and'], L[20], L[True]] -logical_not: _UFunc_Nin1_Nout1[L['logical_not'], L[20], None] -logical_or: _UFunc_Nin2_Nout1[L['logical_or'], L[20], L[False]] -logical_xor: _UFunc_Nin2_Nout1[L['logical_xor'], L[19], L[False]] -matmul: _GUFunc_Nin2_Nout1[L['matmul'], L[19], None, L["(n?,k),(k,m?)->(n?,m?)"]] -matvec: _GUFunc_Nin2_Nout1[L['matvec'], L[19], None, L["(m,n),(n)->(m)"]] -maximum: _UFunc_Nin2_Nout1[L['maximum'], L[21], None] -minimum: _UFunc_Nin2_Nout1[L['minimum'], L[21], None] -mod: _UFunc_Nin2_Nout1[L['remainder'], L[16], None] -modf: _UFunc_Nin1_Nout2[L['modf'], L[4], None] -multiply: _UFunc_Nin2_Nout1[L['multiply'], L[23], L[1]] -negative: _UFunc_Nin1_Nout1[L['negative'], L[19], None] -nextafter: _UFunc_Nin2_Nout1[L['nextafter'], L[4], None] -not_equal: _UFunc_Nin2_Nout1[L['not_equal'], L[23], None] -positive: _UFunc_Nin1_Nout1[L['positive'], L[19], None] -power: _UFunc_Nin2_Nout1[L['power'], L[18], None] -rad2deg: _UFunc_Nin1_Nout1[L['rad2deg'], L[5], None] -radians: _UFunc_Nin1_Nout1[L['radians'], L[5], None] -reciprocal: _UFunc_Nin1_Nout1[L['reciprocal'], L[18], None] -remainder: _UFunc_Nin2_Nout1[L['remainder'], L[16], None] -right_shift: _UFunc_Nin2_Nout1[L['right_shift'], L[11], None] -rint: _UFunc_Nin1_Nout1[L['rint'], L[10], None] -sign: _UFunc_Nin1_Nout1[L['sign'], L[19], None] -signbit: _UFunc_Nin1_Nout1[L['signbit'], L[4], None] -sin: _UFunc_Nin1_Nout1[L['sin'], L[9], None] -sinh: _UFunc_Nin1_Nout1[L['sinh'], L[8], None] -spacing: _UFunc_Nin1_Nout1[L['spacing'], L[4], None] -sqrt: _UFunc_Nin1_Nout1[L['sqrt'], L[10], None] -square: _UFunc_Nin1_Nout1[L['square'], L[18], None] -subtract: _UFunc_Nin2_Nout1[L['subtract'], L[21], None] -tan: _UFunc_Nin1_Nout1[L['tan'], L[8], None] -tanh: _UFunc_Nin1_Nout1[L['tanh'], L[8], None] -true_divide: _UFunc_Nin2_Nout1[L['true_divide'], L[11], None] -trunc: _UFunc_Nin1_Nout1[L['trunc'], L[7], None] -vecdot: _GUFunc_Nin2_Nout1[L['vecdot'], L[19], None, L["(n),(n)->()"]] -vecmat: _GUFunc_Nin2_Nout1[L['vecmat'], L[19], None, L["(n),(n,m)->(m)"]] +absolute: _UFunc_Nin1_Nout1[L["absolute"], L[20], None] +add: _UFunc_Nin2_Nout1[L["add"], L[22], L[0]] +arccos: _UFunc_Nin1_Nout1[L["arccos"], L[8], None] +arccosh: _UFunc_Nin1_Nout1[L["arccosh"], L[8], None] +arcsin: _UFunc_Nin1_Nout1[L["arcsin"], L[8], None] +arcsinh: _UFunc_Nin1_Nout1[L["arcsinh"], L[8], None] +arctan2: _UFunc_Nin2_Nout1[L["arctan2"], L[5], None] +arctan: _UFunc_Nin1_Nout1[L["arctan"], L[8], None] +arctanh: _UFunc_Nin1_Nout1[L["arctanh"], L[8], None] +bitwise_and: _UFunc_Nin2_Nout1[L["bitwise_and"], L[12], L[-1]] +bitwise_count: _UFunc_Nin1_Nout1[L["bitwise_count"], L[11], None] +bitwise_or: _UFunc_Nin2_Nout1[L["bitwise_or"], L[12], L[0]] +bitwise_xor: _UFunc_Nin2_Nout1[L["bitwise_xor"], L[12], L[0]] +cbrt: _UFunc_Nin1_Nout1[L["cbrt"], L[5], None] +ceil: _UFunc_Nin1_Nout1[L["ceil"], L[7], None] +conjugate: _UFunc_Nin1_Nout1[L["conjugate"], L[18], None] +copysign: _UFunc_Nin2_Nout1[L["copysign"], L[4], None] +cos: _UFunc_Nin1_Nout1[L["cos"], L[9], None] +cosh: _UFunc_Nin1_Nout1[L["cosh"], L[8], None] +deg2rad: _UFunc_Nin1_Nout1[L["deg2rad"], L[5], None] +degrees: _UFunc_Nin1_Nout1[L["degrees"], L[5], None] +divide: _UFunc_Nin2_Nout1[L["divide"], L[11], None] +divmod: _UFunc_Nin2_Nout2[L["divmod"], L[15], None] +equal: _UFunc_Nin2_Nout1[L["equal"], L[23], None] +exp2: _UFunc_Nin1_Nout1[L["exp2"], L[8], None] +exp: _UFunc_Nin1_Nout1[L["exp"], L[10], None] +expm1: _UFunc_Nin1_Nout1[L["expm1"], L[8], None] +fabs: _UFunc_Nin1_Nout1[L["fabs"], L[5], None] +float_power: _UFunc_Nin2_Nout1[L["float_power"], L[4], None] +floor: _UFunc_Nin1_Nout1[L["floor"], L[7], None] +floor_divide: _UFunc_Nin2_Nout1[L["floor_divide"], L[21], None] +fmax: _UFunc_Nin2_Nout1[L["fmax"], L[21], None] +fmin: _UFunc_Nin2_Nout1[L["fmin"], L[21], None] +fmod: _UFunc_Nin2_Nout1[L["fmod"], L[15], None] +frexp: _UFunc_Nin1_Nout2[L["frexp"], L[4], None] +gcd: _UFunc_Nin2_Nout1[L["gcd"], L[11], L[0]] +greater: _UFunc_Nin2_Nout1[L["greater"], L[23], None] +greater_equal: _UFunc_Nin2_Nout1[L["greater_equal"], L[23], None] +heaviside: _UFunc_Nin2_Nout1[L["heaviside"], L[4], None] +hypot: _UFunc_Nin2_Nout1[L["hypot"], L[5], L[0]] +invert: _UFunc_Nin1_Nout1[L["invert"], L[12], None] +isfinite: _UFunc_Nin1_Nout1[L["isfinite"], L[20], None] +isinf: _UFunc_Nin1_Nout1[L["isinf"], L[20], None] +isnan: _UFunc_Nin1_Nout1[L["isnan"], L[20], None] +isnat: _UFunc_Nin1_Nout1[L["isnat"], L[2], None] +lcm: _UFunc_Nin2_Nout1[L["lcm"], L[11], None] +ldexp: _UFunc_Nin2_Nout1[L["ldexp"], L[8], None] +left_shift: _UFunc_Nin2_Nout1[L["left_shift"], L[11], None] +less: _UFunc_Nin2_Nout1[L["less"], L[23], None] +less_equal: _UFunc_Nin2_Nout1[L["less_equal"], L[23], None] +log10: _UFunc_Nin1_Nout1[L["log10"], L[8], None] +log1p: _UFunc_Nin1_Nout1[L["log1p"], L[8], None] +log2: _UFunc_Nin1_Nout1[L["log2"], L[8], None] +log: _UFunc_Nin1_Nout1[L["log"], L[10], None] +logaddexp2: _UFunc_Nin2_Nout1[L["logaddexp2"], L[4], float] +logaddexp: _UFunc_Nin2_Nout1[L["logaddexp"], L[4], float] +logical_and: _UFunc_Nin2_Nout1[L["logical_and"], L[20], L[True]] +logical_not: _UFunc_Nin1_Nout1[L["logical_not"], L[20], None] +logical_or: _UFunc_Nin2_Nout1[L["logical_or"], L[20], L[False]] +logical_xor: _UFunc_Nin2_Nout1[L["logical_xor"], L[19], L[False]] +matmul: _GUFunc_Nin2_Nout1[L["matmul"], L[19], None, L["(n?,k),(k,m?)->(n?,m?)"]] +matvec: _GUFunc_Nin2_Nout1[L["matvec"], L[19], None, L["(m,n),(n)->(m)"]] +maximum: _UFunc_Nin2_Nout1[L["maximum"], L[21], None] +minimum: _UFunc_Nin2_Nout1[L["minimum"], L[21], None] +modf: _UFunc_Nin1_Nout2[L["modf"], L[4], None] +multiply: _UFunc_Nin2_Nout1[L["multiply"], L[23], L[1]] +negative: _UFunc_Nin1_Nout1[L["negative"], L[19], None] +nextafter: _UFunc_Nin2_Nout1[L["nextafter"], L[4], None] +not_equal: _UFunc_Nin2_Nout1[L["not_equal"], L[23], None] +positive: _UFunc_Nin1_Nout1[L["positive"], L[19], None] +power: _UFunc_Nin2_Nout1[L["power"], L[18], None] +rad2deg: _UFunc_Nin1_Nout1[L["rad2deg"], L[5], None] +radians: _UFunc_Nin1_Nout1[L["radians"], L[5], None] +reciprocal: _UFunc_Nin1_Nout1[L["reciprocal"], L[18], None] +remainder: _UFunc_Nin2_Nout1[L["remainder"], L[16], None] +right_shift: _UFunc_Nin2_Nout1[L["right_shift"], L[11], None] +rint: _UFunc_Nin1_Nout1[L["rint"], L[10], None] +sign: _UFunc_Nin1_Nout1[L["sign"], L[19], None] +signbit: _UFunc_Nin1_Nout1[L["signbit"], L[4], None] +sin: _UFunc_Nin1_Nout1[L["sin"], L[9], None] +sinh: _UFunc_Nin1_Nout1[L["sinh"], L[8], None] +spacing: _UFunc_Nin1_Nout1[L["spacing"], L[4], None] +sqrt: _UFunc_Nin1_Nout1[L["sqrt"], L[10], None] +square: _UFunc_Nin1_Nout1[L["square"], L[18], None] +subtract: _UFunc_Nin2_Nout1[L["subtract"], L[21], None] +tan: _UFunc_Nin1_Nout1[L["tan"], L[8], None] +tanh: _UFunc_Nin1_Nout1[L["tanh"], L[8], None] +trunc: _UFunc_Nin1_Nout1[L["trunc"], L[7], None] +vecdot: _GUFunc_Nin2_Nout1[L["vecdot"], L[19], None, L["(n),(n)->()"]] +vecmat: _GUFunc_Nin2_Nout1[L["vecmat"], L[19], None, L["(n),(n,m)->(m)"]] abs = absolute acos = arccos @@ -4942,31 +6004,14 @@ atanh = arctanh atan2 = arctan2 concat = concatenate bitwise_left_shift = left_shift +bitwise_not = invert bitwise_invert = invert bitwise_right_shift = right_shift +conj = conjugate +mod = remainder permute_dims = transpose pow = power - -class errstate: - def __init__( - self, - *, - call: _ErrCall = ..., - all: _ErrKind | None = ..., - divide: _ErrKind | None = ..., - over: _ErrKind | None = ..., - under: _ErrKind | None = ..., - invalid: _ErrKind | None = ..., - ) -> None: ... - def __enter__(self) -> None: ... - def __exit__( - self, - exc_type: type[BaseException] | None, - exc_value: BaseException | None, - traceback: TracebackType | None, - /, - ) -> None: ... - def __call__(self, func: _CallableT) -> _CallableT: ... +true_divide = divide # TODO: The type of each `__next__` and `iters` return-type depends # on the length and dtype of `args`; we can't describe this behavior yet @@ -4994,77 +6039,48 @@ class broadcast: @final class busdaycalendar: - def __new__( - cls, - weekmask: ArrayLike = ..., - holidays: ArrayLike | dt.date | _NestedSequence[dt.date] = ..., - ) -> busdaycalendar: ... - @property - def weekmask(self) -> NDArray[np.bool]: ... - @property - def holidays(self) -> NDArray[datetime64]: ... - -class finfo(Generic[_FloatingT_co]): - dtype: Final[dtype[_FloatingT_co]] - bits: Final[int] - eps: Final[_FloatingT_co] - epsneg: Final[_FloatingT_co] - iexp: Final[int] - machep: Final[int] - max: Final[_FloatingT_co] - maxexp: Final[int] - min: Final[_FloatingT_co] - minexp: Final[int] - negep: Final[int] - nexp: Final[int] - nmant: Final[int] - precision: Final[int] - resolution: Final[_FloatingT_co] - smallest_subnormal: Final[_FloatingT_co] - @property - def smallest_normal(self) -> _FloatingT_co: ... - @property - def tiny(self) -> _FloatingT_co: ... - @overload - def __new__(cls, dtype: inexact[_NBit1] | _DTypeLike[inexact[_NBit1]]) -> finfo[floating[_NBit1]]: ... - @overload - def __new__(cls, dtype: complex | type[complex]) -> finfo[float64]: ... - @overload - def __new__(cls, dtype: str) -> finfo[floating]: ... - -class iinfo(Generic[_IntegerT_co]): - dtype: Final[dtype[_IntegerT_co]] - kind: Final[LiteralString] - bits: Final[int] - key: Final[LiteralString] + def __init__( + self, + /, + weekmask: str | Sequence[int | bool_ | integer] | _SupportsArray[dtype[bool_ | integer]] = "1111100", + holidays: Sequence[dt.date | datetime64] | _SupportsArray[dtype[datetime64]] | None = None, + ) -> None: ... @property - def min(self) -> int: ... + def weekmask(self) -> ndarray[tuple[int], dtype[bool_]]: ... @property - def max(self) -> int: ... + def holidays(self) -> ndarray[tuple[int], dtype[datetime64[dt.date]]]: ... +@final +class nditer: @overload - def __new__( - cls, dtype: _IntegerT_co | _DTypeLike[_IntegerT_co] - ) -> iinfo[_IntegerT_co]: ... - @overload - def __new__(cls, dtype: int | type[int]) -> iinfo[int_]: ... + def __init__( + self, + /, + op: ArrayLike, + flags: Sequence[_NDIterFlagsKind] | None = None, + op_flags: Sequence[_NDIterFlagsOp] | None = None, + op_dtypes: DTypeLike | None = None, + order: _OrderKACF = "K", + casting: _CastingKind = "safe", + op_axes: Sequence[SupportsIndex] | None = None, + itershape: _ShapeLike | None = None, + buffersize: SupportsIndex = 0, + ) -> None: ... @overload - def __new__(cls, dtype: str) -> iinfo[Any]: ... + def __init__( + self, + /, + op: Sequence[ArrayLike | None], + flags: Sequence[_NDIterFlagsKind] | None = None, + op_flags: Sequence[Sequence[_NDIterFlagsOp]] | None = None, + op_dtypes: Sequence[DTypeLike | None] | None = None, + order: _OrderKACF = "K", + casting: _CastingKind = "safe", + op_axes: Sequence[Sequence[SupportsIndex]] | None = None, + itershape: _ShapeLike | None = None, + buffersize: SupportsIndex = 0, + ) -> None: ... -@final -class nditer: - def __new__( - cls, - op: ArrayLike | Sequence[ArrayLike | None], - flags: Sequence[_NDIterFlagsKind] | None = ..., - op_flags: Sequence[Sequence[_NDIterFlagsOp]] | None = ..., - op_dtypes: DTypeLike | Sequence[DTypeLike] = ..., - order: _OrderKACF = ..., - casting: _CastingKind = ..., - op_axes: Sequence[Sequence[SupportsIndex]] | None = ..., - itershape: _ShapeLike | None = ..., - buffersize: SupportsIndex = ..., - ) -> nditer: ... def __enter__(self) -> nditer: ... def __exit__( self, @@ -5085,24 +6101,24 @@ class nditer: def copy(self) -> nditer: ... def debug_print(self) -> None: ... def enable_external_loop(self) -> None: ... - def iternext(self) -> builtins.bool: ... + def iternext(self) -> py_bool: ... def remove_axis(self, i: SupportsIndex, /) -> None: ... def remove_multi_index(self) -> None: ... def reset(self) -> None: ... @property def dtypes(self) -> tuple[dtype, ...]: ... @property - def finished(self) -> builtins.bool: ... + def finished(self) -> py_bool: ... @property - def has_delayed_bufalloc(self) -> builtins.bool: ... + def has_delayed_bufalloc(self) -> py_bool: ... @property - def has_index(self) -> builtins.bool: ... + def has_index(self) -> py_bool: ... @property - def has_multi_index(self) -> builtins.bool: ... + def has_multi_index(self) -> py_bool: ... @property def index(self) -> int: ... @property - def iterationneedsapi(self) -> builtins.bool: ... + def iterationneedsapi(self) -> py_bool: ... @property def iterindex(self) -> int: ... @property @@ -5125,7 +6141,7 @@ class nditer: def value(self) -> tuple[NDArray[Any], ...]: ... class memmap(ndarray[_ShapeT_co, _DTypeT_co]): - __array_priority__: ClassVar[float] + __array_priority__: ClassVar[float] = 100.0 # pyright: ignore[reportIncompatibleMethodOverride] filename: str | None offset: int mode: str @@ -5134,60 +6150,40 @@ class memmap(ndarray[_ShapeT_co, _DTypeT_co]): subtype, filename: StrOrBytesPath | _SupportsFileMethodsRW, dtype: type[uint8] = ..., - mode: _MemMapModeKind = ..., - offset: int = ..., - shape: int | tuple[int, ...] | None = ..., - order: _OrderKACF = ..., + mode: _MemMapModeKind = "r+", + offset: int = 0, + shape: int | tuple[int, ...] | None = None, + order: _OrderKACF = "C", ) -> memmap[Any, dtype[uint8]]: ... @overload - def __new__( + def __new__[ScalarT: generic]( subtype, filename: StrOrBytesPath | _SupportsFileMethodsRW, - dtype: _DTypeLike[_ScalarT], - mode: _MemMapModeKind = ..., - offset: int = ..., - shape: int | tuple[int, ...] | None = ..., - order: _OrderKACF = ..., - ) -> memmap[Any, dtype[_ScalarT]]: ... + dtype: _DTypeLike[ScalarT], + mode: _MemMapModeKind = "r+", + offset: int = 0, + shape: int | tuple[int, ...] | None = None, + order: _OrderKACF = "C", + ) -> memmap[Any, dtype[ScalarT]]: ... @overload def __new__( subtype, filename: StrOrBytesPath | _SupportsFileMethodsRW, dtype: DTypeLike, - mode: _MemMapModeKind = ..., - offset: int = ..., - shape: int | tuple[int, ...] | None = ..., - order: _OrderKACF = ..., + mode: _MemMapModeKind = "r+", + offset: int = 0, + shape: int | tuple[int, ...] | None = None, + order: _OrderKACF = "C", ) -> memmap[Any, dtype]: ... def __array_finalize__(self, obj: object) -> None: ... - def __array_wrap__( + def __array_wrap__( # type: ignore[override] self, array: memmap[_ShapeT_co, _DTypeT_co], - context: tuple[ufunc, tuple[Any, ...], int] | None = ..., - return_scalar: builtins.bool = ..., + context: tuple[ufunc, tuple[Any, ...], int] | None = None, + return_scalar: py_bool = False, ) -> Any: ... def flush(self) -> None: ... -# TODO: Add a mypy plugin for managing functions whose output type is dependent -# on the literal value of some sort of signature (e.g. `einsum` and `vectorize`) -class vectorize: - pyfunc: Callable[..., Any] - cache: builtins.bool - signature: LiteralString | None - otypes: LiteralString | None - excluded: set[int | str] - __doc__: str | None - def __init__( - self, - pyfunc: Callable[..., Any], - otypes: str | Iterable[DTypeLike] | None = ..., - doc: str | None = ..., - excluded: Iterable[int | str] | None = ..., - cache: builtins.bool = ..., - signature: str | None = ..., - ) -> None: ... - def __call__(self, *args: Any, **kwargs: Any) -> Any: ... - class poly1d: @property def variable(self) -> LiteralString: ... @@ -5223,9 +6219,9 @@ class poly1d: __hash__: ClassVar[None] # type: ignore[assignment] # pyright: ignore[reportIncompatibleMethodOverride] @overload - def __array__(self, /, t: None = None, copy: builtins.bool | None = None) -> ndarray[tuple[int], dtype]: ... + def __array__(self, /, t: None = None, copy: py_bool | None = None) -> ndarray[tuple[int]]: ... @overload - def __array__(self, /, t: _DTypeT, copy: builtins.bool | None = None) -> ndarray[tuple[int], _DTypeT]: ... + def __array__[DTypeT: dtype](self, /, t: DTypeT, copy: py_bool | None = None) -> ndarray[tuple[int], DTypeT]: ... @overload def __call__(self, val: _ScalarLike_co) -> Any: ... @@ -5237,8 +6233,8 @@ class poly1d: def __init__( self, c_or_r: ArrayLike, - r: builtins.bool = ..., - variable: str | None = ..., + r: py_bool = False, + variable: str | None = None, ) -> None: ... def __len__(self) -> int: ... def __neg__(self) -> poly1d: ... @@ -5255,203 +6251,17 @@ class poly1d: def __getitem__(self, val: int, /) -> Any: ... def __setitem__(self, key: int, val: Any, /) -> None: ... def __iter__(self) -> Iterator[Any]: ... - def deriv(self, m: SupportsInt | SupportsIndex = ...) -> poly1d: ... + def deriv(self, m: SupportsInt | SupportsIndex = 1) -> poly1d: ... def integ( self, - m: SupportsInt | SupportsIndex = ..., - k: _ArrayLikeComplex_co | _ArrayLikeObject_co | None = ..., + m: SupportsInt | SupportsIndex = 1, + k: _ArrayLikeComplex_co | _ArrayLikeObject_co | None = 0, ) -> poly1d: ... -class matrix(ndarray[_2DShapeT_co, _DTypeT_co]): - __array_priority__: ClassVar[float] = 10.0 # pyright: ignore[reportIncompatibleMethodOverride] - - def __new__( - subtype, # pyright: ignore[reportSelfClsParameterName] - data: ArrayLike, - dtype: DTypeLike = ..., - copy: builtins.bool = ..., - ) -> matrix[_2D, Incomplete]: ... - def __array_finalize__(self, obj: object) -> None: ... - - @overload # type: ignore[override] - def __getitem__( - self, key: SupportsIndex | _ArrayLikeInt_co | tuple[SupportsIndex | _ArrayLikeInt_co, ...], / - ) -> Incomplete: ... - @overload - def __getitem__(self, key: _ToIndices, /) -> matrix[_2D, _DTypeT_co]: ... - @overload - def __getitem__(self: matrix[Any, dtype[void]], key: str, /) -> matrix[_2D, dtype]: ... - @overload - def __getitem__(self: matrix[Any, dtype[void]], key: list[str], /) -> matrix[_2DShapeT_co, _DTypeT_co]: ... # pyright: ignore[reportIncompatibleMethodOverride] - - # - def __mul__(self, other: ArrayLike, /) -> matrix[_2D, Incomplete]: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride] - def __rmul__(self, other: ArrayLike, /) -> matrix[_2D, Incomplete]: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride] - def __imul__(self, other: ArrayLike, /) -> Self: ... - - # - def __pow__(self, other: ArrayLike, mod: None = None, /) -> matrix[_2D, Incomplete]: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride] - def __rpow__(self, other: ArrayLike, mod: None = None, /) -> matrix[_2D, Incomplete]: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride] - def __ipow__(self, other: ArrayLike, /) -> Self: ... # type: ignore[misc, override] - - # keep in sync with `prod` and `mean` - @overload # type: ignore[override] - def sum(self, axis: None = None, dtype: DTypeLike | None = None, out: None = None) -> Incomplete: ... - @overload - def sum(self, axis: _ShapeLike, dtype: DTypeLike | None = None, out: None = None) -> matrix[_2D, Incomplete]: ... - @overload - def sum(self, axis: _ShapeLike | None, dtype: DTypeLike | None, out: _ArrayT) -> _ArrayT: ... - @overload - def sum(self, axis: _ShapeLike | None = None, dtype: DTypeLike | None = None, *, out: _ArrayT) -> _ArrayT: ... # pyright: ignore[reportIncompatibleMethodOverride] - - # keep in sync with `sum` and `mean` - @overload # type: ignore[override] - def prod(self, axis: None = None, dtype: DTypeLike | None = None, out: None = None) -> Incomplete: ... - @overload - def prod(self, axis: _ShapeLike, dtype: DTypeLike | None = None, out: None = None) -> matrix[_2D, Incomplete]: ... - @overload - def prod(self, axis: _ShapeLike | None, dtype: DTypeLike | None, out: _ArrayT) -> _ArrayT: ... - @overload - def prod(self, axis: _ShapeLike | None = None, dtype: DTypeLike | None = None, *, out: _ArrayT) -> _ArrayT: ... # pyright: ignore[reportIncompatibleMethodOverride] - - # keep in sync with `sum` and `prod` - @overload # type: ignore[override] - def mean(self, axis: None = None, dtype: DTypeLike | None = None, out: None = None) -> Incomplete: ... - @overload - def mean(self, axis: _ShapeLike, dtype: DTypeLike | None = None, out: None = None) -> matrix[_2D, Incomplete]: ... - @overload - def mean(self, axis: _ShapeLike | None, dtype: DTypeLike | None, out: _ArrayT) -> _ArrayT: ... - @overload - def mean(self, axis: _ShapeLike | None = None, dtype: DTypeLike | None = None, *, out: _ArrayT) -> _ArrayT: ... # pyright: ignore[reportIncompatibleMethodOverride] - - # keep in sync with `var` - @overload # type: ignore[override] - def std(self, axis: None = None, dtype: DTypeLike | None = None, out: None = None, ddof: float = 0) -> Incomplete: ... - @overload - def std( - self, axis: _ShapeLike, dtype: DTypeLike | None = None, out: None = None, ddof: float = 0 - ) -> matrix[_2D, Incomplete]: ... - @overload - def std(self, axis: _ShapeLike | None, dtype: DTypeLike | None, out: _ArrayT, ddof: float = 0) -> _ArrayT: ... - @overload - def std( # pyright: ignore[reportIncompatibleMethodOverride] - self, axis: _ShapeLike | None = None, dtype: DTypeLike | None = None, *, out: _ArrayT, ddof: float = 0 - ) -> _ArrayT: ... - - # keep in sync with `std` - @overload # type: ignore[override] - def var(self, axis: None = None, dtype: DTypeLike | None = None, out: None = None, ddof: float = 0) -> Incomplete: ... - @overload - def var( - self, axis: _ShapeLike, dtype: DTypeLike | None = None, out: None = None, ddof: float = 0 - ) -> matrix[_2D, Incomplete]: ... - @overload - def var(self, axis: _ShapeLike | None, dtype: DTypeLike | None, out: _ArrayT, ddof: float = 0) -> _ArrayT: ... - @overload - def var( # pyright: ignore[reportIncompatibleMethodOverride] - self, axis: _ShapeLike | None = None, dtype: DTypeLike | None = None, *, out: _ArrayT, ddof: float = 0 - ) -> _ArrayT: ... - - # keep in sync with `all` - @overload # type: ignore[override] - def any(self, axis: None = None, out: None = None) -> np.bool: ... - @overload - def any(self, axis: _ShapeLike, out: None = None) -> matrix[_2D, dtype[np.bool]]: ... - @overload - def any(self, axis: _ShapeLike | None, out: _ArrayT) -> _ArrayT: ... - @overload - def any(self, axis: _ShapeLike | None = None, *, out: _ArrayT) -> _ArrayT: ... # pyright: ignore[reportIncompatibleMethodOverride] - - # keep in sync with `any` - @overload # type: ignore[override] - def all(self, axis: None = None, out: None = None) -> np.bool: ... - @overload - def all(self, axis: _ShapeLike, out: None = None) -> matrix[_2D, dtype[np.bool]]: ... - @overload - def all(self, axis: _ShapeLike | None, out: _ArrayT) -> _ArrayT: ... - @overload - def all(self, axis: _ShapeLike | None = None, *, out: _ArrayT) -> _ArrayT: ... # pyright: ignore[reportIncompatibleMethodOverride] - - # keep in sync with `min` and `ptp` - @overload # type: ignore[override] - def max(self: NDArray[_ScalarT], axis: None = None, out: None = None) -> _ScalarT: ... - @overload - def max(self, axis: _ShapeLike, out: None = None) -> matrix[_2D, _DTypeT_co]: ... - @overload - def max(self, axis: _ShapeLike | None, out: _ArrayT) -> _ArrayT: ... - @overload - def max(self, axis: _ShapeLike | None = None, *, out: _ArrayT) -> _ArrayT: ... # pyright: ignore[reportIncompatibleMethodOverride] - - # keep in sync with `max` and `ptp` - @overload # type: ignore[override] - def min(self: NDArray[_ScalarT], axis: None = None, out: None = None) -> _ScalarT: ... - @overload - def min(self, axis: _ShapeLike, out: None = None) -> matrix[_2D, _DTypeT_co]: ... - @overload - def min(self, axis: _ShapeLike | None, out: _ArrayT) -> _ArrayT: ... - @overload - def min(self, axis: _ShapeLike | None = None, *, out: _ArrayT) -> _ArrayT: ... # pyright: ignore[reportIncompatibleMethodOverride] - - # keep in sync with `max` and `min` - @overload - def ptp(self: NDArray[_ScalarT], axis: None = None, out: None = None) -> _ScalarT: ... - @overload - def ptp(self, axis: _ShapeLike, out: None = None) -> matrix[_2D, _DTypeT_co]: ... - @overload - def ptp(self, axis: _ShapeLike | None, out: _ArrayT) -> _ArrayT: ... - @overload - def ptp(self, axis: _ShapeLike | None = None, *, out: _ArrayT) -> _ArrayT: ... # pyright: ignore[reportIncompatibleMethodOverride] - - # keep in sync with `argmin` - @overload # type: ignore[override] - def argmax(self: NDArray[_ScalarT], axis: None = None, out: None = None) -> intp: ... - @overload - def argmax(self, axis: _ShapeLike, out: None = None) -> matrix[_2D, dtype[intp]]: ... - @overload - def argmax(self, axis: _ShapeLike | None, out: _BoolOrIntArrayT) -> _BoolOrIntArrayT: ... - @overload - def argmax(self, axis: _ShapeLike | None = None, *, out: _BoolOrIntArrayT) -> _BoolOrIntArrayT: ... # pyright: ignore[reportIncompatibleMethodOverride] - - # keep in sync with `argmax` - @overload # type: ignore[override] - def argmin(self: NDArray[_ScalarT], axis: None = None, out: None = None) -> intp: ... - @overload - def argmin(self, axis: _ShapeLike, out: None = None) -> matrix[_2D, dtype[intp]]: ... - @overload - def argmin(self, axis: _ShapeLike | None, out: _BoolOrIntArrayT) -> _BoolOrIntArrayT: ... - @overload - def argmin(self, axis: _ShapeLike | None = None, *, out: _BoolOrIntArrayT) -> _BoolOrIntArrayT: ... # pyright: ignore[reportIncompatibleMethodOverride] - - #the second overload handles the (rare) case that the matrix is not 2-d - @overload - def tolist(self: matrix[_2D, dtype[generic[_T]]]) -> list[list[_T]]: ... # pyright: ignore[reportIncompatibleMethodOverride] - @overload - def tolist(self) -> Incomplete: ... # pyright: ignore[reportIncompatibleMethodOverride] - - # these three methods will at least return a `2-d` array of shape (1, n) - def squeeze(self, axis: _ShapeLike | None = None) -> matrix[_2D, _DTypeT_co]: ... - def ravel(self, /, order: _OrderKACF = "C") -> matrix[_2D, _DTypeT_co]: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride] - def flatten(self, /, order: _OrderKACF = "C") -> matrix[_2D, _DTypeT_co]: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride] - - # matrix.T is inherited from _ScalarOrArrayCommon - def getT(self) -> Self: ... - @property - def I(self) -> matrix[_2D, Incomplete]: ... # noqa: E743 - def getI(self) -> matrix[_2D, Incomplete]: ... - @property - def A(self) -> ndarray[_2DShapeT_co, _DTypeT_co]: ... - def getA(self) -> ndarray[_2DShapeT_co, _DTypeT_co]: ... - @property - def A1(self) -> ndarray[_AnyShape, _DTypeT_co]: ... - def getA1(self) -> ndarray[_AnyShape, _DTypeT_co]: ... - @property - def H(self) -> matrix[_2D, _DTypeT_co]: ... - def getH(self) -> matrix[_2D, _DTypeT_co]: ... - def from_dlpack( x: _SupportsDLPack[None], /, *, device: L["cpu"] | None = None, - copy: builtins.bool | None = None, -) -> NDArray[number | np.bool]: ... + copy: py_bool | None = None, +) -> NDArray[number | bool_]: ... diff --git a/numpy/_array_api_info.py b/numpy/_array_api_info.py index 6ea9e13587f4..067e38798718 100644 --- a/numpy/_array_api_info.py +++ b/numpy/_array_api_info.py @@ -94,14 +94,14 @@ def capabilities(self): >>> info = np.__array_namespace_info__() >>> info.capabilities() {'boolean indexing': True, - 'data-dependent shapes': True} + 'data-dependent shapes': True, + 'max dimensions': 64} """ return { "boolean indexing": True, "data-dependent shapes": True, - # 'max rank' will be part of the 2024.12 standard - # "max rank": 64, + "max dimensions": 64, } def default_device(self): diff --git a/numpy/_array_api_info.pyi b/numpy/_array_api_info.pyi index ee9f8a5660c3..0bad9c65b137 100644 --- a/numpy/_array_api_info.pyi +++ b/numpy/_array_api_info.pyi @@ -1,19 +1,9 @@ -from typing import ( - ClassVar, - Literal, - Never, - TypeAlias, - TypedDict, - TypeVar, - final, - overload, - type_check_only, -) +from typing import Literal, Never, TypedDict, final, overload, type_check_only import numpy as np -_Device: TypeAlias = Literal["cpu"] -_DeviceLike: TypeAlias = _Device | None +type _Device = Literal["cpu"] +type _DeviceLike = _Device | None _Capabilities = TypedDict( "_Capabilities", @@ -33,33 +23,22 @@ _DefaultDTypes = TypedDict( }, ) -_KindBool: TypeAlias = Literal["bool"] -_KindInt: TypeAlias = Literal["signed integer"] -_KindUInt: TypeAlias = Literal["unsigned integer"] -_KindInteger: TypeAlias = Literal["integral"] -_KindFloat: TypeAlias = Literal["real floating"] -_KindComplex: TypeAlias = Literal["complex floating"] -_KindNumber: TypeAlias = Literal["numeric"] -_Kind: TypeAlias = ( - _KindBool - | _KindInt - | _KindUInt - | _KindInteger - | _KindFloat - | _KindComplex - | _KindNumber -) - -_T1 = TypeVar("_T1") -_T2 = TypeVar("_T2") -_T3 = TypeVar("_T3") -_Permute1: TypeAlias = _T1 | tuple[_T1] -_Permute2: TypeAlias = tuple[_T1, _T2] | tuple[_T2, _T1] -_Permute3: TypeAlias = ( - tuple[_T1, _T2, _T3] | tuple[_T1, _T3, _T2] - | tuple[_T2, _T1, _T3] | tuple[_T2, _T3, _T1] - | tuple[_T3, _T1, _T2] | tuple[_T3, _T2, _T1] -) +type _KindBool = Literal["bool"] +type _KindInt = Literal["signed integer"] +type _KindUInt = Literal["unsigned integer"] +type _KindInteger = Literal["integral"] +type _KindFloat = Literal["real floating"] +type _KindComplex = Literal["complex floating"] +type _KindNumber = Literal["numeric"] +type _Kind = _KindBool | _KindInt | _KindUInt | _KindInteger | _KindFloat | _KindComplex | _KindNumber + +type _Permute1[T1] = T1 | tuple[T1] +type _Permute2[T1, T2] = tuple[T1, T2] | tuple[T2, T1] +type _Permute3[T1, T2, T3] = ( + tuple[T1, T2, T3] | tuple[T1, T3, T2] + | tuple[T2, T1, T3] | tuple[T2, T3, T1] + | tuple[T3, T1, T2] | tuple[T3, T2, T1] +) # fmt: skip @type_check_only class _DTypesBool(TypedDict): @@ -114,94 +93,84 @@ class _DTypesUnion(TypedDict, total=False): complex64: np.dtype[np.complex64] complex128: np.dtype[np.complex128] -_EmptyDict: TypeAlias = dict[Never, Never] +type _EmptyDict = dict[Never, Never] @final class __array_namespace_info__: - __module__: ClassVar[Literal['numpy']] + __module__: Literal["numpy"] = "numpy" def capabilities(self) -> _Capabilities: ... def default_device(self) -> _Device: ... - def default_dtypes( - self, - *, - device: _DeviceLike = ..., - ) -> _DefaultDTypes: ... + def default_dtypes(self, *, device: _DeviceLike = None) -> _DefaultDTypes: ... def devices(self) -> list[_Device]: ... @overload def dtypes( self, *, - device: _DeviceLike = ..., - kind: None = ..., + device: _DeviceLike = None, + kind: None = None, ) -> _DTypes: ... @overload def dtypes( self, *, - device: _DeviceLike = ..., + device: _DeviceLike = None, kind: _Permute1[_KindBool], ) -> _DTypesBool: ... @overload def dtypes( self, *, - device: _DeviceLike = ..., + device: _DeviceLike = None, kind: _Permute1[_KindInt], ) -> _DTypesInt: ... @overload def dtypes( self, *, - device: _DeviceLike = ..., + device: _DeviceLike = None, kind: _Permute1[_KindUInt], ) -> _DTypesUInt: ... @overload def dtypes( self, *, - device: _DeviceLike = ..., + device: _DeviceLike = None, kind: _Permute1[_KindFloat], ) -> _DTypesFloat: ... @overload def dtypes( self, *, - device: _DeviceLike = ..., + device: _DeviceLike = None, kind: _Permute1[_KindComplex], ) -> _DTypesComplex: ... @overload def dtypes( self, *, - device: _DeviceLike = ..., - kind: ( - _Permute1[_KindInteger] - | _Permute2[_KindInt, _KindUInt] - ), + device: _DeviceLike = None, + kind: _Permute1[_KindInteger] | _Permute2[_KindInt, _KindUInt], ) -> _DTypesInteger: ... @overload def dtypes( self, *, - device: _DeviceLike = ..., - kind: ( - _Permute1[_KindNumber] - | _Permute3[_KindInteger, _KindFloat, _KindComplex] - ), + device: _DeviceLike = None, + kind: _Permute1[_KindNumber] | _Permute3[_KindInteger, _KindFloat, _KindComplex], ) -> _DTypesNumber: ... @overload def dtypes( self, *, - device: _DeviceLike = ..., + device: _DeviceLike = None, kind: tuple[()], ) -> _EmptyDict: ... @overload def dtypes( self, *, - device: _DeviceLike = ..., + device: _DeviceLike = None, kind: tuple[_Kind, ...], ) -> _DTypesUnion: ... diff --git a/numpy/_build_utils/__init__.py b/numpy/_build_utils/__init__.py deleted file mode 100644 index 10b282d8d9ee..000000000000 --- a/numpy/_build_utils/__init__.py +++ /dev/null @@ -1,22 +0,0 @@ -# Don't use the deprecated NumPy C API. Define this to a fixed version -# instead of NPY_API_VERSION in order not to break compilation for -# released SciPy versions when NumPy introduces a new deprecation. Use -# in setup.py:: -# -# config.add_extension('_name', sources=['source_fname'], **numpy_nodepr_api) -# -numpy_nodepr_api = { - "define_macros": [("NPY_NO_DEPRECATED_API", "NPY_1_9_API_VERSION")] -} - - -def import_file(folder, module_name): - """Import a file directly, avoiding importing scipy""" - import importlib - import pathlib - - fname = pathlib.Path(folder) / f'{module_name}.py' - spec = importlib.util.spec_from_file_location(module_name, str(fname)) - module = importlib.util.module_from_spec(spec) - spec.loader.exec_module(module) - return module diff --git a/numpy/distutils/conv_template.py b/numpy/_build_utils/conv_template.py similarity index 94% rename from numpy/distutils/conv_template.py rename to numpy/_build_utils/conv_template.py index c8933d1d4286..fb57abdf1587 100644 --- a/numpy/distutils/conv_template.py +++ b/numpy/_build_utils/conv_template.py @@ -82,8 +82,8 @@ __all__ = ['process_str', 'process_file'] import os -import sys import re +import sys # names for replacement that are already global. global_names = {} @@ -106,10 +106,10 @@ def parse_structure(astr, level): at zero. Returns an empty list if no loops found. """ - if level == 0 : + if level == 0: loopbeg = "/**begin repeat" loopend = "/**end repeat**/" - else : + else: loopbeg = "/**begin repeat%d" % level loopend = "/**end repeat%d**/" % level @@ -124,9 +124,9 @@ def parse_structure(astr, level): start2 = astr.find("\n", start2) fini1 = astr.find(loopend, start2) fini2 = astr.find("\n", fini1) - line += astr.count("\n", ind, start2+1) - spanlist.append((start, start2+1, fini1, fini2+1, line)) - line += astr.count("\n", start2+1, fini2) + line += astr.count("\n", ind, start2 + 1) + spanlist.append((start, start2 + 1, fini1, fini2 + 1, line)) + line += astr.count("\n", start2 + 1, fini2) ind = fini2 spanlist.sort() return spanlist @@ -135,10 +135,13 @@ def parse_structure(astr, level): def paren_repl(obj): torep = obj.group(1) numrep = obj.group(2) - return ','.join([torep]*int(numrep)) + return ','.join([torep] * int(numrep)) + parenrep = re.compile(r"\(([^)]*)\)\*(\d+)") plainrep = re.compile(r"([^*]+)\*(\d+)") + + def parse_values(astr): # replaces all occurrences of '(a,b,c)*4' in astr # with 'a,b,c,a,b,c,a,b,c,a,b,c'. Empty braces generate @@ -155,7 +158,7 @@ def parse_values(astr): named_re = re.compile(r"#\s*(\w*)\s*=([^#]*)#") exclude_vars_re = re.compile(r"(\w*)=(\w*)") exclude_re = re.compile(":exclude:") -def parse_loop_header(loophead) : +def parse_loop_header(loophead): """Find all named replacements in the header Returns a list of dictionaries, one for each loop iteration, @@ -179,14 +182,13 @@ def parse_loop_header(loophead) : name = rep[0] vals = parse_values(rep[1]) size = len(vals) - if nsub is None : + if nsub is None: nsub = size - elif nsub != size : + elif nsub != size: msg = "Mismatch in number of values, %d != %d\n%s = %s" raise ValueError(msg % (nsub, size, name, vals)) names.append((name, vals)) - # Find any exclude variables excludes = [] @@ -200,30 +202,33 @@ def parse_loop_header(loophead) : # generate list of dictionaries, one for each template iteration dlist = [] - if nsub is None : + if nsub is None: raise ValueError("No substitution variables found") for i in range(nsub): tmp = {name: vals[i] for name, vals in names} dlist.append(tmp) return dlist + replace_re = re.compile(r"@(\w+)@") -def parse_string(astr, env, level, line) : + + +def parse_string(astr, env, level, line): lineno = "#line %d\n" % line # local function for string replacement, uses env def replace(match): name = match.group(1) - try : + try: val = env[name] except KeyError: - msg = 'line %d: no definition of key "%s"'%(line, name) + msg = f'line {line}: no definition of key "{name}"' raise ValueError(msg) from None return val code = [lineno] struct = parse_structure(astr, level) - if struct : + if struct: # recurse over inner loops oldend = 0 newlevel = level + 1 @@ -234,18 +239,18 @@ def replace(match): oldend = sub[3] newline = line + sub[4] code.append(replace_re.sub(replace, pref)) - try : + try: envlist = parse_loop_header(head) except ValueError as e: msg = "line %d: %s" % (newline, e) raise ValueError(msg) - for newenv in envlist : + for newenv in envlist: newenv.update(env) newcode = parse_string(text, newenv, newlevel, newline) code.extend(newcode) suff = astr[oldend:] code.append(replace_re.sub(replace, suff)) - else : + else: # replace keys code.append(replace_re.sub(replace, astr)) code.append('\n') @@ -325,5 +330,6 @@ def main(): outfile.write(writestr) + if __name__ == "__main__": main() diff --git a/numpy/_build_utils/gitversion.py b/numpy/_build_utils/gitversion.py index 7975dd9dba65..47dd71d1567b 100644 --- a/numpy/_build_utils/gitversion.py +++ b/numpy/_build_utils/gitversion.py @@ -28,7 +28,7 @@ def git_version(version): git_hash = '' try: p = subprocess.Popen( - ['git', 'log', '-1', '--format="%H %aI"'], + ['git', '-c', 'log.showSignature=false', 'log', '-1', '--format="%H %aI"'], stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=os.path.dirname(__file__), diff --git a/numpy/_build_utils/process_src_template.py b/numpy/_build_utils/process_src_template.py index 8bd1ea872a42..f934c222e838 100644 --- a/numpy/_build_utils/process_src_template.py +++ b/numpy/_build_utils/process_src_template.py @@ -5,11 +5,11 @@ def get_processor(): - # Convoluted because we can't import from numpy.distutils + # Convoluted because we can't import from numpy # (numpy is not yet built) conv_template_path = os.path.join( os.path.dirname(__file__), - '..', 'distutils', 'conv_template.py' + 'conv_template.py' ) spec = importlib.util.spec_from_file_location( 'conv_template', conv_template_path diff --git a/numpy/_core/__init__.py b/numpy/_core/__init__.py index d0da7e0ad9ed..18b250f9972b 100644 --- a/numpy/_core/__init__.py +++ b/numpy/_core/__init__.py @@ -22,29 +22,64 @@ from . import multiarray except ImportError as exc: import sys - msg = """ + + # Bypass for the module re-initialization opt-out + if exc.msg == "cannot load module more than once per process": + raise + + # Basically always, the problem should be that the C module is wrong/missing... + if ( + isinstance(exc, ModuleNotFoundError) + and exc.name == "numpy._core._multiarray_umath" + ): + import sys + candidates = [] + for path in __path__: + candidates.extend( + f for f in os.listdir(path) if f.startswith("_multiarray_umath")) + if len(candidates) == 0: + bad_c_module_info = ( + "We found no compiled module, did NumPy build successfully?\n") + else: + candidate_str = '\n * '.join(candidates) + # cache_tag is documented to be possibly None, so just use name if it is + # this guesses at cache_tag being the same as the extension module scheme + tag = sys.implementation.cache_tag or sys.implementation.name + bad_c_module_info = ( + f"The following compiled module files exist, but seem incompatible\n" + f"with with either python '{tag}' or the " + f"platform '{sys.platform}':\n\n * {candidate_str}\n" + ) + else: + bad_c_module_info = "" + + major, minor, *_ = sys.version_info + msg = f""" IMPORTANT: PLEASE READ THIS FOR ADVICE ON HOW TO SOLVE THIS ISSUE! Importing the numpy C-extensions failed. This error can happen for many reasons, often due to issues with your setup or how NumPy was installed. - +{bad_c_module_info} We have compiled some common reasons and troubleshooting tips at: https://numpy.org/devdocs/user/troubleshooting-importerror.html Please note and check the following: - * The Python version is: Python%d.%d from "%s" - * The NumPy version is: "%s" + * The Python version is: Python {major}.{minor} from "{sys.executable}" + * The NumPy version is: "{__version__}" and make sure that they are the versions you expect. -Please carefully study the documentation linked above for further help. -Original error was: %s -""" % (sys.version_info[0], sys.version_info[1], sys.executable, - __version__, exc) +Please carefully study the information and documentation linked above. +This is unlikely to be a NumPy issue but will be caused by a bad install +or environment on your machine. + +Original error was: {exc} +""" + raise ImportError(msg) from exc finally: for envkey in env_added: @@ -71,15 +106,7 @@ from .numerictypes import sctypeDict, sctypes multiarray.set_typeDict(nt.sctypeDict) -from . import ( - _machar, - einsumfunc, - fromnumeric, - function_base, - getlimits, - numeric, - shape_base, -) +from . import einsumfunc, fromnumeric, function_base, getlimits, numeric, shape_base from .einsumfunc import * from .fromnumeric import * from .function_base import * @@ -160,18 +187,6 @@ def _DType_reduce(DType): return _DType_reconstruct, (scalar_type,) -def __getattr__(name): - # Deprecated 2022-11-22, NumPy 1.25. - if name == "MachAr": - import warnings - warnings.warn( - "The `np._core.MachAr` is considered private API (NumPy 1.24)", - DeprecationWarning, stacklevel=2, - ) - return _machar.MachAr - raise AttributeError(f"Module {__name__!r} has no attribute {name!r}") - - import copyreg copyreg.pickle(ufunc, _ufunc_reduce) diff --git a/numpy/_core/__init__.pyi b/numpy/_core/__init__.pyi index 40d9c411b97c..ce5427bbfcd9 100644 --- a/numpy/_core/__init__.pyi +++ b/numpy/_core/__init__.pyi @@ -1,2 +1,666 @@ -# NOTE: The `np._core` namespace is deliberately kept empty due to it -# being private +# keep in sync with https://github.com/numpy/numtype/blob/main/src/numpy-stubs/_core/__init__.pyi + +from ._asarray import require +from ._ufunc_config import ( + errstate, + getbufsize, + geterr, + geterrcall, + setbufsize, + seterr, + seterrcall, +) +from .arrayprint import ( + array2string, + array_repr, + array_str, + format_float_positional, + format_float_scientific, + get_printoptions, + printoptions, + set_printoptions, +) +from .einsumfunc import einsum, einsum_path +from .fromnumeric import ( + all, + amax, + amin, + any, + argmax, + argmin, + argpartition, + argsort, + around, + choose, + clip, + compress, + cumprod, + cumsum, + cumulative_prod, + cumulative_sum, + diagonal, + matrix_transpose, + max, + mean, + min, + ndim, + nonzero, + partition, + prod, + ptp, + put, + ravel, + repeat, + reshape, + resize, + round, + searchsorted, + shape, + size, + sort, + squeeze, + std, + sum, + swapaxes, + take, + trace, + transpose, + transpose as permute_dims, + var, +) +from .function_base import geomspace, linspace, logspace +from .getlimits import finfo, iinfo +from .memmap import memmap +from .numeric import ( + False_, + True_, + allclose, + arange, + argwhere, + array, + array_equal, + array_equiv, + asanyarray, + asarray, + ascontiguousarray, + asfortranarray, + astype, + base_repr, + binary_repr, + bitwise_not, + broadcast, + can_cast, + concatenate, + concatenate as concat, + convolve, + copyto, + correlate, + count_nonzero, + cross, + dot, + dtype, + empty, + empty_like, + flatiter, + flatnonzero, + from_dlpack, + frombuffer, + fromfile, + fromfunction, + fromiter, + fromstring, + full, + full_like, + identity, + indices, + inf, + inner, + isclose, + isfortran, + isscalar, + lexsort, + little_endian, + matmul, + may_share_memory, + min_scalar_type, + moveaxis, + nan, + ndarray, + nditer, + nested_iters, + newaxis, + ones, + ones_like, + outer, + promote_types, + putmask, + result_type, + roll, + rollaxis, + shares_memory, + tensordot, + ufunc, + vdot, + vecdot, + where, + zeros, + zeros_like, +) +from .numerictypes import ( + ScalarType, + bool, + bool_, + busday_count, + busday_offset, + busdaycalendar, + byte, + bytes_, + cdouble, + character, + clongdouble, + complex64, + complex128, + complex192, + complex256, + complexfloating, + csingle, + datetime64, + datetime_as_string, + datetime_data, + double, + flexible, + float16, + float32, + float64, + float96, + float128, + floating, + generic, + half, + inexact, + int8, + int16, + int32, + int64, + int_, + intc, + integer, + intp, + is_busday, + isdtype, + issubdtype, + long, + longdouble, + longlong, + number, + object_, + sctypeDict, + short, + signedinteger, + single, + str_, + timedelta64, + typecodes, + ubyte, + uint, + uint8, + uint16, + uint32, + uint64, + uintc, + uintp, + ulong, + ulonglong, + unsignedinteger, + ushort, + void, +) +from .records import recarray, record +from .shape_base import ( + atleast_1d, + atleast_2d, + atleast_3d, + block, + hstack, + stack, + unstack, + vstack, +) +from .umath import ( + absolute, + absolute as abs, + add, + arccos, + arccos as acos, + arccosh, + arccosh as acosh, + arcsin, + arcsin as asin, + arcsinh, + arcsinh as asinh, + arctan, + arctan as atan, + arctan2, + arctan2 as atan2, + arctanh, + arctanh as atanh, + bitwise_and, + bitwise_count, + bitwise_or, + bitwise_xor, + cbrt, + ceil, + conj, + conjugate, + copysign, + cos, + cosh, + deg2rad, + degrees, + divide, + divmod, + e, + equal, + euler_gamma, + exp, + exp2, + expm1, + fabs, + float_power, + floor, + floor_divide, + fmax, + fmin, + fmod, + frexp, + frompyfunc, + gcd, + greater, + greater_equal, + heaviside, + hypot, + invert, + invert as bitwise_invert, + isfinite, + isinf, + isnan, + isnat, + lcm, + ldexp, + left_shift, + left_shift as bitwise_left_shift, + less, + less_equal, + log, + log1p, + log2, + log10, + logaddexp, + logaddexp2, + logical_and, + logical_not, + logical_or, + logical_xor, + matvec, + maximum, + minimum, + mod, + modf, + multiply, + negative, + nextafter, + not_equal, + pi, + positive, + power, + power as pow, + rad2deg, + radians, + reciprocal, + remainder, + right_shift, + right_shift as bitwise_right_shift, + rint, + sign, + signbit, + sin, + sinh, + spacing, + sqrt, + square, + subtract, + tan, + tanh, + true_divide, + trunc, + vecmat, +) + +__all__ = [ + "False_", + "ScalarType", + "True_", + "abs", + "absolute", + "acos", + "acosh", + "add", + "all", + "allclose", + "amax", + "amin", + "any", + "arange", + "arccos", + "arccosh", + "arcsin", + "arcsinh", + "arctan", + "arctan2", + "arctanh", + "argmax", + "argmin", + "argpartition", + "argsort", + "argwhere", + "around", + "array", + "array2string", + "array_equal", + "array_equiv", + "array_repr", + "array_str", + "asanyarray", + "asarray", + "ascontiguousarray", + "asfortranarray", + "asin", + "asinh", + "astype", + "atan", + "atan2", + "atanh", + "atleast_1d", + "atleast_2d", + "atleast_3d", + "base_repr", + "binary_repr", + "bitwise_and", + "bitwise_count", + "bitwise_invert", + "bitwise_left_shift", + "bitwise_not", + "bitwise_or", + "bitwise_right_shift", + "bitwise_xor", + "block", + "bool", + "bool_", + "broadcast", + "busday_count", + "busday_offset", + "busdaycalendar", + "byte", + "bytes_", + "can_cast", + "cbrt", + "cdouble", + "ceil", + "character", + "choose", + "clip", + "clongdouble", + "complex64", + "complex128", + "complex192", + "complex256", + "complexfloating", + "compress", + "concat", + "concatenate", + "conj", + "conjugate", + "convolve", + "copysign", + "copyto", + "correlate", + "cos", + "cosh", + "count_nonzero", + "cross", + "csingle", + "cumprod", + "cumsum", + "cumulative_prod", + "cumulative_sum", + "datetime64", + "datetime_as_string", + "datetime_data", + "deg2rad", + "degrees", + "diagonal", + "divide", + "divmod", + "dot", + "double", + "dtype", + "e", + "einsum", + "einsum_path", + "empty", + "empty_like", + "equal", + "errstate", + "euler_gamma", + "exp", + "exp2", + "expm1", + "fabs", + "finfo", + "flatiter", + "flatnonzero", + "flexible", + "float16", + "float32", + "float64", + "float96", + "float128", + "float_power", + "floating", + "floor", + "floor_divide", + "fmax", + "fmin", + "fmod", + "format_float_positional", + "format_float_scientific", + "frexp", + "from_dlpack", + "frombuffer", + "fromfile", + "fromfunction", + "fromiter", + "frompyfunc", + "fromstring", + "full", + "full_like", + "gcd", + "generic", + "geomspace", + "get_printoptions", + "getbufsize", + "geterr", + "geterrcall", + "greater", + "greater_equal", + "half", + "heaviside", + "hstack", + "hypot", + "identity", + "iinfo", + "indices", + "inexact", + "inf", + "inner", + "int8", + "int16", + "int32", + "int64", + "int_", + "intc", + "integer", + "intp", + "invert", + "is_busday", + "isclose", + "isdtype", + "isfinite", + "isfortran", + "isinf", + "isnan", + "isnat", + "isscalar", + "issubdtype", + "lcm", + "ldexp", + "left_shift", + "less", + "less_equal", + "lexsort", + "linspace", + "little_endian", + "log", + "log1p", + "log2", + "log10", + "logaddexp", + "logaddexp2", + "logical_and", + "logical_not", + "logical_or", + "logical_xor", + "logspace", + "long", + "longdouble", + "longlong", + "matmul", + "matrix_transpose", + "matvec", + "max", + "maximum", + "may_share_memory", + "mean", + "memmap", + "min", + "min_scalar_type", + "minimum", + "mod", + "modf", + "moveaxis", + "multiply", + "nan", + "ndarray", + "ndim", + "nditer", + "negative", + "nested_iters", + "newaxis", + "nextafter", + "nonzero", + "not_equal", + "number", + "object_", + "ones", + "ones_like", + "outer", + "partition", + "permute_dims", + "pi", + "positive", + "pow", + "power", + "printoptions", + "prod", + "promote_types", + "ptp", + "put", + "putmask", + "rad2deg", + "radians", + "ravel", + "recarray", + "reciprocal", + "record", + "remainder", + "repeat", + "require", + "reshape", + "resize", + "result_type", + "right_shift", + "rint", + "roll", + "rollaxis", + "round", + "sctypeDict", + "searchsorted", + "set_printoptions", + "setbufsize", + "seterr", + "seterrcall", + "shape", + "shares_memory", + "short", + "sign", + "signbit", + "signedinteger", + "sin", + "single", + "sinh", + "size", + "sort", + "spacing", + "sqrt", + "square", + "squeeze", + "stack", + "std", + "str_", + "subtract", + "sum", + "swapaxes", + "take", + "tan", + "tanh", + "tensordot", + "timedelta64", + "trace", + "transpose", + "true_divide", + "trunc", + "typecodes", + "ubyte", + "ufunc", + "uint", + "uint8", + "uint16", + "uint32", + "uint64", + "uintc", + "uintp", + "ulong", + "ulonglong", + "unsignedinteger", + "unstack", + "ushort", + "var", + "vdot", + "vecdot", + "vecmat", + "void", + "vstack", + "where", + "zeros", + "zeros_like", +] diff --git a/numpy/_core/_add_newdocs.py b/numpy/_core/_add_newdocs.py index 8f5de4b7bd89..b37014b6a648 100644 --- a/numpy/_core/_add_newdocs.py +++ b/numpy/_core/_add_newdocs.py @@ -9,6 +9,8 @@ """ +import textwrap + from numpy._core.function_base import add_newdoc from numpy._core.overrides import get_array_function_like_doc # noqa: F401 @@ -80,7 +82,6 @@ """)) - add_newdoc('numpy._core', 'flatiter', ('coords', """ An N-dimensional tuple of current coordinates. @@ -99,7 +100,6 @@ """)) - add_newdoc('numpy._core', 'flatiter', ('index', """ Current flat index into the array. @@ -118,17 +118,25 @@ """)) -# flatiter functions +# flatiter methods add_newdoc('numpy._core', 'flatiter', ('__array__', - """__array__(type=None) Get array from iterator + """ + __array__($self, dtype=None, /, *, copy=None) + -- - """)) + flat.__array__([dtype], *, copy=None) + + Get array from iterator + """)) add_newdoc('numpy._core', 'flatiter', ('copy', """ - copy() + copy($self, /) + -- + + flat.copy() Get a copy of the iterator as a 1-D array. @@ -154,6 +162,19 @@ add_newdoc('numpy._core', 'nditer', """ + nditer( + op, + flags=None, + op_flags=None, + op_dtypes=None, + order='K', + casting='safe', + op_axes=None, + itershape=None, + buffersize=0, + ) + -- + nditer(op, flags=None, op_flags=None, op_dtypes=None, order='K', casting='safe', op_axes=None, itershape=None, buffersize=0) @@ -165,63 +186,62 @@ ---------- op : ndarray or sequence of array_like The array(s) to iterate over. - flags : sequence of str, optional - Flags to control the behavior of the iterator. - - * ``buffered`` enables buffering when required. - * ``c_index`` causes a C-order index to be tracked. - * ``f_index`` causes a Fortran-order index to be tracked. - * ``multi_index`` causes a multi-index, or a tuple of indices - with one per iteration dimension, to be tracked. - * ``common_dtype`` causes all the operands to be converted to - a common data type, with copying or buffering as necessary. - * ``copy_if_overlap`` causes the iterator to determine if read - operands have overlap with write operands, and make temporary - copies as necessary to avoid overlap. False positives (needless - copying) are possible in some cases. - * ``delay_bufalloc`` delays allocation of the buffers until - a reset() call is made. Allows ``allocate`` operands to - be initialized before their values are copied into the buffers. - * ``external_loop`` causes the ``values`` given to be - one-dimensional arrays with multiple values instead of - zero-dimensional arrays. - * ``grow_inner`` allows the ``value`` array sizes to be made - larger than the buffer size when both ``buffered`` and - ``external_loop`` is used. - * ``ranged`` allows the iterator to be restricted to a sub-range - of the iterindex values. - * ``refs_ok`` enables iteration of reference types, such as - object arrays. - * ``reduce_ok`` enables iteration of ``readwrite`` operands - which are broadcasted, also known as reduction operands. - * ``zerosize_ok`` allows `itersize` to be zero. + Flags to control the behavior of the iterator. + + * ``buffered`` enables buffering when required. + * ``c_index`` causes a C-order index to be tracked. + * ``f_index`` causes a Fortran-order index to be tracked. + * ``multi_index`` causes a multi-index, or a tuple of indices + with one per iteration dimension, to be tracked. + * ``common_dtype`` causes all the operands to be converted to + a common data type, with copying or buffering as necessary. + * ``copy_if_overlap`` causes the iterator to determine if read + operands have overlap with write operands, and make temporary + copies as necessary to avoid overlap. False positives (needless + copying) are possible in some cases. + * ``delay_bufalloc`` delays allocation of the buffers until + a reset() call is made. Allows ``allocate`` operands to + be initialized before their values are copied into the buffers. + * ``external_loop`` causes the ``values`` given to be + one-dimensional arrays with multiple values instead of + zero-dimensional arrays. + * ``grow_inner`` allows the ``value`` array sizes to be made + larger than the buffer size when both ``buffered`` and + ``external_loop`` is used. + * ``ranged`` allows the iterator to be restricted to a sub-range + of the iterindex values. + * ``refs_ok`` enables iteration of reference types, such as + object arrays. + * ``reduce_ok`` enables iteration of ``readwrite`` operands + which are broadcasted, also known as reduction operands. + * ``zerosize_ok`` allows `itersize` to be zero. op_flags : list of list of str, optional - This is a list of flags for each operand. At minimum, one of - ``readonly``, ``readwrite``, or ``writeonly`` must be specified. - - * ``readonly`` indicates the operand will only be read from. - * ``readwrite`` indicates the operand will be read from and written to. - * ``writeonly`` indicates the operand will only be written to. - * ``no_broadcast`` prevents the operand from being broadcasted. - * ``contig`` forces the operand data to be contiguous. - * ``aligned`` forces the operand data to be aligned. - * ``nbo`` forces the operand data to be in native byte order. - * ``copy`` allows a temporary read-only copy if required. - * ``updateifcopy`` allows a temporary read-write copy if required. - * ``allocate`` causes the array to be allocated if it is None - in the ``op`` parameter. - * ``no_subtype`` prevents an ``allocate`` operand from using a subtype. - * ``arraymask`` indicates that this operand is the mask to use - for selecting elements when writing to operands with the - 'writemasked' flag set. The iterator does not enforce this, - but when writing from a buffer back to the array, it only - copies those elements indicated by this mask. - * ``writemasked`` indicates that only elements where the chosen - ``arraymask`` operand is True will be written to. - * ``overlap_assume_elementwise`` can be used to mark operands that are - accessed only in the iterator order, to allow less conservative - copying when ``copy_if_overlap`` is present. + This is a list of flags for each operand. At minimum, one of + ``readonly``, ``readwrite``, or ``writeonly`` must be specified. + + * ``readonly`` indicates the operand will only be read from. + * ``readwrite`` indicates the operand will be read from and written to. + * ``writeonly`` indicates the operand will only be written to. + * ``no_broadcast`` prevents the operand from being broadcasted. + * ``contig`` forces the operand data to be contiguous. + * ``aligned`` forces the operand data to be aligned. + * ``nbo`` forces the operand data to be in native byte order. + * ``copy`` allows a temporary read-only copy if required. + * ``updateifcopy`` allows a temporary read-write copy if required. + * ``allocate`` causes the array to be allocated if it is None + in the ``op`` parameter. + * ``no_subtype`` prevents an ``allocate`` operand from using a subtype. + * ``arraymask`` indicates that this operand is the mask to use + for selecting elements when writing to operands with the + 'writemasked' flag set. The iterator does not enforce this, + but when writing from a buffer back to the array, it only + copies those elements indicated by this mask. + * ``writemasked`` indicates that only elements where the chosen + ``arraymask`` operand is True will be written to. + * ``overlap_assume_elementwise`` can be used to mark operands that are + accessed only in the iterator order, to allow less conservative + copying when ``copy_if_overlap`` is present. op_dtypes : dtype or tuple of dtype(s), optional The required data type(s) of the operands. If copying or buffering is enabled, the data will be converted to/from their original types. @@ -398,11 +418,11 @@ original data when the :meth:`~object.__exit__` function is called but not before: - >>> a = np.arange(6, dtype='i4')[::-2] + >>> a = np.arange(6, dtype=np.int32)[::-2] >>> with np.nditer(a, [], ... [['writeonly', 'updateifcopy']], ... casting='unsafe', - ... op_dtypes=[np.dtype('f4')]) as i: + ... op_dtypes=[np.dtype(np.float32)]) as i: ... x = i.operands[0] ... x[:] = [-1, -2, -3] ... # a still unchanged here @@ -422,10 +442,22 @@ """) +# nditer attributes + +add_newdoc('numpy._core', 'nditer', ('operands', + """ + operands[`Slice`] + + The array(s) to be iterated over. Valid only before the iterator is closed. + """)) + # nditer methods add_newdoc('numpy._core', 'nditer', ('copy', """ + copy($self, /) + -- + copy() Get a copy of the iterator in its current state. @@ -444,15 +476,11 @@ """)) -add_newdoc('numpy._core', 'nditer', ('operands', - """ - operands[`Slice`] - - The array(s) to be iterated over. Valid only before the iterator is closed. - """)) - add_newdoc('numpy._core', 'nditer', ('debug_print', """ + debug_print($self, /) + -- + debug_print() Print the current state of the `nditer` instance and debug info to stdout. @@ -461,6 +489,9 @@ add_newdoc('numpy._core', 'nditer', ('enable_external_loop', """ + enable_external_loop($self, /) + -- + enable_external_loop() When the "external_loop" was not used during construction, but @@ -471,6 +502,9 @@ add_newdoc('numpy._core', 'nditer', ('iternext', """ + iternext($self, /) + -- + iternext() Check whether iterations are left, and perform a single internal iteration @@ -486,6 +520,9 @@ add_newdoc('numpy._core', 'nditer', ('remove_axis', """ + remove_axis($self, i, /) + -- + remove_axis(i, /) Removes axis `i` from the iterator. Requires that the flag "multi_index" @@ -495,6 +532,9 @@ add_newdoc('numpy._core', 'nditer', ('remove_multi_index', """ + remove_multi_index($self, /) + -- + remove_multi_index() When the "multi_index" flag was specified, this removes it, allowing @@ -504,32 +544,62 @@ add_newdoc('numpy._core', 'nditer', ('reset', """ + reset($self, /) + -- + reset() Reset the iterator to its initial state. """)) +add_newdoc('numpy._core', 'nditer', ('close', + """ + close($self, /) + -- + + close() + + Resolve all writeback semantics in writeable operands. + + See Also + -------- + :ref:`nditer-context-manager` + + """)) + +# nested_iters + add_newdoc('numpy._core', 'nested_iters', """ - nested_iters(op, axes, flags=None, op_flags=None, op_dtypes=None, \ - order="K", casting="safe", buffersize=0) + nested_iters( + op, + axes, + flags=None, + op_flags=None, + op_dtypes=None, + order='K', + casting='safe', + buffersize=0, + ) + -- + + nested_iters(op, axes, flags=None, op_flags=None, op_dtypes=None, + order='K', casting='safe', buffersize=0) Create nditers for use in nested loops Create a tuple of `nditer` objects which iterate in nested loops over different axes of the op argument. The first iterator is used in the - outermost loop, the last in the innermost loop. Advancing one will change - the subsequent iterators to point at its new element. + outermost loop, the last in the innermost loop. Advancing one will + change the subsequent iterators to point at its new element. Parameters ---------- op : ndarray or sequence of array_like The array(s) to iterate over. - axes : list of list of int Each item is used as an "op_axes" argument to an nditer - flags, op_flags, op_dtypes, order, casting, buffersize (optional) See `nditer` parameters of the same name @@ -574,20 +644,6 @@ """) -add_newdoc('numpy._core', 'nditer', ('close', - """ - close() - - Resolve all writeback semantics in writeable operands. - - See Also - -------- - - :ref:`nditer-context-manager` - - """)) - - ############################################################################### # # broadcast @@ -596,6 +652,9 @@ add_newdoc('numpy._core', 'broadcast', """ + broadcast(*arrays) + -- + Produce an object that mimics broadcasting. Parameters @@ -765,8 +824,13 @@ """)) +# methods + add_newdoc('numpy._core', 'broadcast', ('reset', """ + reset($self, /) + -- + reset() Reset the broadcasted result's iterator(s). @@ -805,8 +869,21 @@ add_newdoc('numpy._core.multiarray', 'array', """ + array( + object, + dtype=None, + *, + copy=True, + order='K', + subok=False, + ndmin=0, + ndmax=0, + like=None, + ) + -- + array(object, dtype=None, *, copy=True, order='K', subok=False, ndmin=0, - like=None) + ndmax=0, like=None) Create an array. @@ -855,6 +932,16 @@ Specifies the minimum number of dimensions that the resulting array should have. Ones will be prepended to the shape as needed to meet this requirement. + ndmax : int, optional + Specifies the maximum number of dimensions to create when inferring + shape from nested sequences. By default (ndmax=0), NumPy recurses + through all nesting levels (up to the compile-time constant + ``NPY_MAXDIMS``). + Setting ``ndmax`` stops recursion at the specified depth, preserving + deeper nested structures as objects instead of promoting them to + higher-dimensional arrays. In this case, ``dtype=np.object_`` is required. + + .. versionadded:: 2.4.0 ${ARRAY_FUNCTION_LIKE} .. versionadded:: 1.20.0 @@ -874,7 +961,7 @@ ones : Return a new array setting values to one. zeros : Return a new array setting values to zero. full : Return a new array of given shape filled with value. - copy: Return an array copy of the given object. + copy : Return an array copy of the given object. Notes @@ -907,7 +994,7 @@ Type provided: - >>> np.array([1, 2, 3], dtype=complex) + >>> np.array([1, 2, 3], dtype=np.complex128) array([ 1.+0.j, 2.+0.j, 3.+0.j]) Data-type consisting of more than one element: @@ -926,10 +1013,28 @@ matrix([[1, 2], [3, 4]]) + Limiting the maximum dimensions with ``ndmax``: + + >>> a = np.array([[1, 2], [3, 4]], dtype=np.object_, ndmax=2) + >>> a + array([[1, 2], + [3, 4]], dtype=object) + >>> a.shape + (2, 2) + + >>> b = np.array([[1, 2], [3, 4]], dtype=np.object_, ndmax=1) + >>> b + array([list([1, 2]), list([3, 4])], dtype=object) + >>> b.shape + (2,) + """) add_newdoc('numpy._core.multiarray', 'asarray', """ + asarray(a, dtype=None, order=None, *, device=None, copy=None, like=None) + -- + asarray(a, dtype=None, order=None, *, device=None, copy=None, like=None) Convert the input to an array. @@ -943,12 +1048,13 @@ dtype : data-type, optional By default, the data-type is inferred from the input data. order : {'C', 'F', 'A', 'K'}, optional - Memory layout. 'A' and 'K' depend on the order of input array a. - 'C' row-major (C-style), - 'F' column-major (Fortran-style) memory representation. - 'A' (any) means 'F' if `a` is Fortran contiguous, 'C' otherwise - 'K' (keep) preserve input order - Defaults to 'K'. + The memory layout of the output. + 'C' gives a row-major layout (C-style), + 'F' gives a column-major layout (Fortran-style). + 'C' and 'F' will copy if needed to ensure the output format. + 'A' (any) is equivalent to 'F' if input a is non-contiguous or Fortran-contiguous, otherwise, it is equivalent to 'C'. + Unlike 'C' or 'F', 'A' does not ensure that the result is contiguous. + 'K' (keep) is the default and preserves the input order for the output. device : str, optional The device on which to place the created array. Default: ``None``. For Array-API interoperability only, so must be ``"cpu"`` if passed. @@ -978,12 +1084,10 @@ -------- asanyarray : Similar function which passes through subclasses. ascontiguousarray : Convert input to a contiguous array. - asfortranarray : Convert input to an ndarray with column-major - memory order. + asfortranarray : Convert input to an ndarray with column-major memory order. asarray_chkfinite : Similar function which checks input for NaNs and Infs. fromiter : Create an array from an iterator. - fromfunction : Construct an array by executing a function on grid - positions. + fromfunction : Construct an array by executing a function on grid positions. Examples -------- @@ -1022,6 +1126,9 @@ add_newdoc('numpy._core.multiarray', 'asanyarray', """ + asanyarray(a, dtype=None, order=None, *, device=None, copy=None, like=None) + -- + asanyarray(a, dtype=None, order=None, *, device=None, copy=None, like=None) Convert the input to an ndarray, but pass ndarray subclasses through. @@ -1035,12 +1142,14 @@ dtype : data-type, optional By default, the data-type is inferred from the input data. order : {'C', 'F', 'A', 'K'}, optional - Memory layout. 'A' and 'K' depend on the order of input array a. - 'C' row-major (C-style), - 'F' column-major (Fortran-style) memory representation. - 'A' (any) means 'F' if `a` is Fortran contiguous, 'C' otherwise - 'K' (keep) preserve input order - Defaults to 'C'. + The memory layout of the output. + 'C' gives a row-major layout (C-style), + 'F' gives a column-major layout (Fortran-style). + 'C' and 'F' will copy if needed to ensure the output format. + 'A' (any) is equivalent to 'F' if input a is non-contiguous or Fortran-contiguous, otherwise, it is equivalent to 'C'. + Unlike 'C' or 'F', 'A' does not ensure that the result is contiguous. + 'K' (keep) preserves the input order for the output. + 'C' is the default. device : str, optional The device on which to place the created array. Default: ``None``. For Array-API interoperability only, so must be ``"cpu"`` if passed. @@ -1071,13 +1180,10 @@ -------- asarray : Similar function which always returns ndarrays. ascontiguousarray : Convert input to a contiguous array. - asfortranarray : Convert input to an ndarray with column-major - memory order. - asarray_chkfinite : Similar function which checks input for NaNs and - Infs. + asfortranarray : Convert input to an ndarray with column-major memory order. + asarray_chkfinite : Similar function which checks input for NaNs and Infs. fromiter : Create an array from an iterator. - fromfunction : Construct an array by executing a function on grid - positions. + fromfunction : Construct an array by executing a function on grid positions. Examples -------- @@ -1098,6 +1204,9 @@ add_newdoc('numpy._core.multiarray', 'ascontiguousarray', """ + ascontiguousarray(a, dtype=None, *, like=None) + -- + ascontiguousarray(a, dtype=None, *, like=None) Return a contiguous array (ndim >= 1) in memory (C order). @@ -1120,8 +1229,7 @@ See Also -------- - asfortranarray : Convert input to an ndarray with column-major - memory order. + asfortranarray : Convert input to an ndarray with column-major memory order. require : Return an ndarray that satisfies requirements. ndarray.flags : Information about the memory layout of the array. @@ -1161,6 +1269,9 @@ add_newdoc('numpy._core.multiarray', 'asfortranarray', """ + asfortranarray(a, dtype=None, *, like=None) + -- + asfortranarray(a, dtype=None, *, like=None) Return an array (ndim >= 1) laid out in Fortran order in memory. @@ -1224,7 +1335,10 @@ add_newdoc('numpy._core.multiarray', 'empty', """ - empty(shape, dtype=float, order='C', *, device=None, like=None) + empty(shape, dtype=None, order='C', *, device=None, like=None) + -- + + empty(shape, dtype=None, order='C', *, device=None, like=None) Return a new array of given shape and type, without initializing entries. @@ -1237,8 +1351,7 @@ `numpy.float64`. order : {'C', 'F'}, optional, default: 'C' Whether to store multi-dimensional data in row-major - (C-style) or column-major (Fortran-style) order in - memory. + (C-style) or column-major (Fortran-style) order in memory. device : str, optional The device on which to place the created array. Default: ``None``. For Array-API interoperability only, so must be ``"cpu"`` if passed. @@ -1276,7 +1389,7 @@ array([[ -9.74499359e+001, 6.69583040e-309], [ 2.13182611e-314, 3.06959433e-309]]) #uninitialized - >>> np.empty([2, 2], dtype=int) + >>> np.empty([2, 2], dtype=np.int_) array([[-1073741821, -1067949133], [ 496041986, 19249760]]) #uninitialized @@ -1294,11 +1407,14 @@ string. If `obj` is not given, it will be interpreted as None for object type and as zeros for all other types. - """) + """) # sufficient null bytes for all number dtypes add_newdoc('numpy._core.multiarray', 'zeros', """ - zeros(shape, dtype=float, order='C', *, like=None) + zeros(shape, dtype=None, order='C', *, device=None, like=None) + -- + + zeros(shape, dtype=None, order='C', *, device=None, like=None) Return a new array of given shape and type, filled with zeros. @@ -1311,8 +1427,12 @@ `numpy.float64`. order : {'C', 'F'}, optional, default: 'C' Whether to store multi-dimensional data in row-major - (C-style) or column-major (Fortran-style) order in - memory. + (C-style) or column-major (Fortran-style) order in memory. + device : str, optional + The device on which to place the created array. Default: ``None``. + For Array-API interoperability only, so must be ``"cpu"`` if passed. + + .. versionadded:: 2.0.0 ${ARRAY_FUNCTION_LIKE} .. versionadded:: 1.20.0 @@ -1335,7 +1455,7 @@ >>> np.zeros(5) array([ 0., 0., 0., 0., 0.]) - >>> np.zeros((5,), dtype=int) + >>> np.zeros((5,), dtype=np.int_) array([0, 0, 0, 0, 0]) >>> np.zeros((2, 1)) @@ -1354,7 +1474,8 @@ """) add_newdoc('numpy._core.multiarray', 'set_typeDict', - """set_typeDict(dict) + """ + set_typeDict(dict) Set the internal dictionary that can look up an array type using a registered code. @@ -1363,7 +1484,10 @@ add_newdoc('numpy._core.multiarray', 'fromstring', """ - fromstring(string, dtype=float, count=-1, *, sep, like=None) + fromstring(string, dtype=None, count=-1, *, sep, like=None) + -- + + fromstring(string, dtype=np.float64, count=-1, *, sep, like=None) A new 1-D array initialized from text data in a string. @@ -1372,7 +1496,7 @@ string : str A string containing the data. dtype : data-type, optional - The data type of the array; default: float. For binary input data, + The data type of the array; default: `numpy.float64`. For binary input data, the data must be in exactly this format. Most builtin numeric types are supported and extension types may be supported. count : int, optional @@ -1414,15 +1538,18 @@ Examples -------- >>> import numpy as np - >>> np.fromstring('1 2', dtype=int, sep=' ') + >>> np.fromstring('1 2', dtype=np.int_, sep=' ') array([1, 2]) - >>> np.fromstring('1, 2', dtype=int, sep=',') + >>> np.fromstring('1, 2', dtype=np.int_, sep=',') array([1, 2]) """) add_newdoc('numpy._core.multiarray', 'compare_chararrays', """ + compare_chararrays(a1, a2, cmp, rstrip) + -- + compare_chararrays(a1, a2, cmp, rstrip) Performs element-wise comparison of two string arrays using the @@ -1434,20 +1561,20 @@ Arrays to be compared. cmp : {"<", "<=", "==", ">=", ">", "!="} Type of comparison. - rstrip : Boolean - If True, the spaces at the end of Strings are removed before the comparison. + rstrip : bool + If True, the spaces at the end of strings are removed before the comparison. Returns ------- out : ndarray - The output array of type Boolean with the same shape as a and b. + The output array of type `numpy.bool` with the same shape as `a1` and `a2`. Raises ------ ValueError If `cmp` is not valid. TypeError - If at least one of `a` or `b` is a non-string array + If at least one of `a1` or `a2` is a non-string array Examples -------- @@ -1461,6 +1588,9 @@ add_newdoc('numpy._core.multiarray', 'fromiter', """ + fromiter(iter, dtype, count=-1, *, like=None) + -- + fromiter(iter, dtype, count=-1, *, like=None) Create a new 1-dimensional array from an iterable object. @@ -1516,7 +1646,10 @@ add_newdoc('numpy._core.multiarray', 'fromfile', """ - fromfile(file, dtype=float, count=-1, sep='', offset=0, *, like=None) + fromfile(file, dtype=None, count=-1, sep='', offset=0, *, like=None) + -- + + fromfile(file, dtype=np.float64, count=-1, sep='', offset=0, *, like=None) Construct an array from data in a text or binary file. @@ -1527,7 +1660,9 @@ Parameters ---------- file : file or str or Path - Open file object or filename. + An open file object, a string containing the filename, or a Path object. + When reading from a file object it must support random access + (i.e. it must have tell and seek methods). dtype : data-type Data type of the returned array. For binary files, it is used to determine the size and byte-order @@ -1599,7 +1734,10 @@ add_newdoc('numpy._core.multiarray', 'frombuffer', """ - frombuffer(buffer, dtype=float, count=-1, offset=0, *, like=None) + frombuffer(buffer, dtype=None, count=-1, offset=0, *, like=None) + -- + + frombuffer(buffer, dtype=np.float64, count=-1, offset=0, *, like=None) Interpret a buffer as a 1-dimensional array. @@ -1608,7 +1746,7 @@ buffer : buffer_like An object that exposes the buffer interface. dtype : data-type, optional - Data-type of the returned array; default: float. + Data-type of the returned array. Default is `numpy.float64`. count : int, optional Number of items to read. ``-1`` means all data in the buffer. offset : int, optional @@ -1632,7 +1770,7 @@ If the buffer has data that is not in machine byte-order, this should be specified as part of the data-type, e.g.:: - >>> dt = np.dtype(int) + >>> dt = np.dtype(np.int_) >>> dt = dt.newbyteorder('>') >>> np.frombuffer(buf, dtype=dt) # doctest: +SKIP @@ -1659,6 +1797,9 @@ add_newdoc('numpy._core.multiarray', 'from_dlpack', """ + from_dlpack(x, /, *, device=None, copy=None) + -- + from_dlpack(x, /, *, device=None, copy=None) Create a NumPy array from an object implementing the ``__dlpack__`` @@ -1709,6 +1850,9 @@ add_newdoc('numpy._core.multiarray', 'arange', """ + arange(start_or_stop, /, stop=None, step=1, *, dtype=None, device=None, like=None) + -- + arange([start,] stop[, step,], dtype=None, *, device=None, like=None) Return evenly spaced values within a given interval. @@ -1781,9 +1925,9 @@ `start` is much larger than `step`. This can lead to unexpected behaviour. For example:: - >>> np.arange(0, 5, 0.5, dtype=int) + >>> np.arange(0, 5, 0.5, dtype=np.int_) array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0]) - >>> np.arange(-3, 3, 0.5, dtype=int) + >>> np.arange(-3, 3, 0.5, dtype=np.int_) array([-3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8]) In such cases, the use of `numpy.linspace` should be preferred. @@ -1833,13 +1977,16 @@ add_newdoc('numpy._core.multiarray', '_reconstruct', """_reconstruct(subtype, shape, dtype) - Construct an empty array. Used by Pickles. + Construct an empty array. Used by Pickle. """) add_newdoc('numpy._core.multiarray', 'promote_types', """ - promote_types(type1, type2) + promote_types(type1, type2, /) + -- + + promote_types(type1, type2, /) Returns the data type with the smallest size and smallest scalar kind to which both ``type1`` and ``type2`` may be safely cast. @@ -1883,16 +2030,16 @@ Examples -------- >>> import numpy as np - >>> np.promote_types('f4', 'f8') + >>> np.promote_types(np.float32, np.float64) dtype('float64') - >>> np.promote_types('i8', 'f4') + >>> np.promote_types(np.int64, np.float32) dtype('float64') >>> np.promote_types('>i8', '>> np.promote_types('i4', 'S8') + >>> np.promote_types(np.int32, 'S8') dtype('S11') An example of a non-associative case: @@ -2226,8 +2373,10 @@ add_newdoc('numpy._core.multiarray', 'ndarray', """ - ndarray(shape, dtype=float, buffer=None, offset=0, - strides=None, order=None) + ndarray(shape, dtype=None, buffer=None, offset=0, strides=None, order=None) + -- + + ndarray(shape, dtype=np.float64, buffer=None, offset=0, strides=None, order=None) An array object represents a multidimensional, homogeneous array of fixed-size items. An associated data-type object describes the @@ -2250,6 +2399,7 @@ Shape of created array. dtype : data-type, optional Any object that can be interpreted as a numpy data type. + Default is `numpy.float64`. buffer : object exposing buffer interface, optional Used to fill the array with data. offset : int, optional @@ -2335,7 +2485,7 @@ First mode, `buffer` is None: >>> import numpy as np - >>> np.ndarray(shape=(2,2), dtype=float, order='F') + >>> np.ndarray(shape=(2,2), dtype=np.float64, order='F') array([[0.0e+000, 0.0e+000], # random [ nan, 2.5e-323]]) @@ -2343,7 +2493,7 @@ >>> np.ndarray((2,), buffer=np.array([1,2,3]), ... offset=np.int_().itemsize, - ... dtype=int) # offset = 1*itemsize, i.e. skip first element + ... dtype=np.int_) # offset = 1*itemsize, i.e. skip first element array([2, 3]) """) @@ -2367,21 +2517,6 @@ add_newdoc('numpy._core.multiarray', 'ndarray', ('__array_struct__', """Array protocol: C-struct side.""")) -add_newdoc('numpy._core.multiarray', 'ndarray', ('__dlpack__', - """ - a.__dlpack__(*, stream=None, max_version=None, dl_device=None, copy=None) - - DLPack Protocol: Part of the Array API. - - """)) - -add_newdoc('numpy._core.multiarray', 'ndarray', ('__dlpack_device__', - """ - a.__dlpack_device__() - - DLPack Protocol: Part of the Array API. - - """)) add_newdoc('numpy._core.multiarray', 'ndarray', ('base', """ @@ -2953,6 +3088,7 @@ [5, 7]]]) """)) + ############################################################################## # # ndarray methods @@ -2962,6 +3098,9 @@ add_newdoc('numpy._core.multiarray', 'ndarray', ('__array__', """ + __array__($self, dtype=None, /, *, copy=None) + -- + a.__array__([dtype], *, copy=None) For ``dtype`` parameter it returns a new reference to self if @@ -2981,6 +3120,9 @@ add_newdoc('numpy._core.multiarray', 'ndarray', ('__array_finalize__', """ + __array_finalize__($self, obj, /) + -- + a.__array_finalize__(obj, /) Present so subclasses can call super. Does nothing. @@ -2988,29 +3130,48 @@ """)) -add_newdoc('numpy._core.multiarray', 'ndarray', ('__array_wrap__', +add_newdoc('numpy._core.multiarray', 'ndarray', ('__array_function__', """ - a.__array_wrap__(array[, context], /) + __array_function__($self, /, func, types, args, kwargs) + -- - Returns a view of `array` with the same type as self. + a.__array_function__(func, types, args, kwargs) + + See :ref:`NEP 18 ` and :ref:`NEP 35 ` for details. """)) -add_newdoc('numpy._core.multiarray', 'ndarray', ('__copy__', +add_newdoc('numpy._core.multiarray', 'ndarray', ('__array_ufunc__', """ - a.__copy__() + __array_ufunc__($self, ufunc, method, /, *inputs, **kwargs) + -- - Used if :func:`copy.copy` is called on an array. Returns a copy of the array. + a.__array_ufunc__(ufunc, method, /, *inputs, **kwargs) - Equivalent to ``a.copy(order='K')``. + See :ref:`NEP 13 ` for details. + + """)) + + +add_newdoc('numpy._core.multiarray', 'ndarray', ('__array_wrap__', + """ + __array_wrap__($self, array, context=None, return_scalar=True, /) + -- + + a.__array_wrap__(array[, context[, return_scalar]], /) + + Returns a view of `array` with the same type as self. """)) add_newdoc('numpy._core.multiarray', 'ndarray', ('__class_getitem__', """ - a.__class_getitem__(item, /) + __class_getitem__($cls, item, /) + -- + + ndarray[shape, dtype] Return a parametrized wrapper around the `~numpy.ndarray` type. @@ -3023,11 +3184,10 @@ Examples -------- - >>> from typing import Any >>> import numpy as np - >>> np.ndarray[Any, np.dtype[np.uint8]] - numpy.ndarray[typing.Any, numpy.dtype[numpy.uint8]] + >>> np.ndarray[tuple[int], np.dtype[np.uint8]] + numpy.ndarray[tuple[int], numpy.dtype[numpy.uint8]] See Also -------- @@ -3038,17 +3198,36 @@ """)) -add_newdoc('numpy._core.multiarray', 'ndarray', ('__deepcopy__', +add_newdoc('numpy._core.multiarray', 'ndarray', ('__dlpack__', """ - a.__deepcopy__(memo, /) + __dlpack__($self, /, *, stream=None, max_version=None, dl_device=None, copy=None) + -- - Used if :func:`copy.deepcopy` is called on an array. + a.__dlpack__(*, stream=None, max_version=None, dl_device=None, copy=None) + + Exports the array for consumption by ``from_dlpack()`` as a DLPack capsule. + + """)) + + +add_newdoc('numpy._core.multiarray', 'ndarray', ('__dlpack_device__', + """ + __dlpack_device__($self, /) + -- + + a.__dlpack_device__() + + Returns device type (``1``) and device ID (``0``) in DLPack format. + Meant for use within ``from_dlpack()``. """)) add_newdoc('numpy._core.multiarray', 'ndarray', ('__reduce__', """ + __reduce__($self, /) + -- + a.__reduce__() For pickling. @@ -3056,8 +3235,23 @@ """)) +add_newdoc('numpy._core.multiarray', 'ndarray', ('__reduce_ex__', + """ + __reduce_ex__($self, protocol, /) + -- + + a.__reduce_ex__(protocol, /) + + For pickling. + + """)) + + add_newdoc('numpy._core.multiarray', 'ndarray', ('__setstate__', """ + __setstate__($self, state, /) + -- + a.__setstate__(state, /) For unpickling. @@ -3078,97 +3272,242 @@ """)) -add_newdoc('numpy._core.multiarray', 'ndarray', ('all', +add_newdoc('numpy._core.multiarray', 'ndarray', ('dot', """ - a.all(axis=None, out=None, keepdims=False, *, where=True) + dot($self, other, /, out=None) + -- - Returns True if all elements evaluate to True. + a.dot(other, /, out=None) - Refer to `numpy.all` for full documentation. + Refer to :func:`numpy.dot` for full documentation. See Also -------- - numpy.all : equivalent function + numpy.dot : equivalent function """)) -add_newdoc('numpy._core.multiarray', 'ndarray', ('any', +add_newdoc('numpy._core.multiarray', 'ndarray', ('argpartition', """ - a.any(axis=None, out=None, keepdims=False, *, where=True) + argpartition($self, kth, /, axis=-1, kind='introselect', order=None) + -- - Returns True if any of the elements of `a` evaluate to True. + a.argpartition(kth, axis=-1, kind='introselect', order=None) - Refer to `numpy.any` for full documentation. + Returns the indices that would partition this array. + + Refer to `numpy.argpartition` for full documentation. See Also -------- - numpy.any : equivalent function + numpy.argpartition : equivalent function """)) -add_newdoc('numpy._core.multiarray', 'ndarray', ('argmax', +add_newdoc('numpy._core.multiarray', 'ndarray', ('partition', """ - a.argmax(axis=None, out=None, *, keepdims=False) + partition($self, kth, /, axis=-1, kind='introselect', order=None) + -- - Return indices of the maximum values along the given axis. + a.partition(kth, axis=-1, kind='introselect', order=None) - Refer to `numpy.argmax` for full documentation. + Partially sorts the elements in the array in such a way that the value of + the element in k-th position is in the position it would be in a sorted + array. In the output array, all elements smaller than the k-th element + are located to the left of this element and all equal or greater are + located to its right. The ordering of the elements in the two partitions + on the either side of the k-th element in the output array is undefined. + + Parameters + ---------- + kth : int or sequence of ints + Element index to partition by. The kth element value will be in its + final sorted position and all smaller elements will be moved before it + and all equal or greater elements behind it. + The order of all elements in the partitions is undefined. + If provided with a sequence of kth it will partition all elements + indexed by kth of them into their sorted position at once. + + .. deprecated:: 1.22.0 + Passing booleans as index is deprecated. + axis : int, optional + Axis along which to sort. Default is -1, which means sort along the + last axis. + kind : {'introselect'}, optional + Selection algorithm. Default is 'introselect'. + order : str or list of str, optional + When `a` is an array with fields defined, this argument specifies + which fields to compare first, second, etc. A single field can + be specified as a string, and not all fields need to be specified, + but unspecified fields will still be used, in the order in which + they come up in the dtype, to break ties. See Also -------- - numpy.argmax : equivalent function + numpy.partition : Return a partitioned copy of an array. + argpartition : Indirect partition. + sort : Full sort. - """)) + Notes + ----- + See ``np.partition`` for notes on the different algorithms. + Examples + -------- + >>> import numpy as np + >>> a = np.array([3, 4, 2, 1]) + >>> a.partition(3) + >>> a + array([2, 1, 3, 4]) # may vary -add_newdoc('numpy._core.multiarray', 'ndarray', ('argmin', - """ - a.argmin(axis=None, out=None, *, keepdims=False) + >>> a.partition((1, 3)) + >>> a + array([1, 2, 3, 4]) - Return indices of the minimum values along the given axis. + """)) - Refer to `numpy.argmin` for detailed documentation. - See Also - -------- - numpy.argmin : equivalent function +############################################################################## +# +# methods from both `ndarray` and `generic` +# +############################################################################## - """)) +_METHOD_DOC_TEMPLATE = """{name}({params}) +-- +{doc}""" -add_newdoc('numpy._core.multiarray', 'ndarray', ('argsort', +def _array_method_doc(name: str, params: str, doc: str) -> None: """ - a.argsort(axis=-1, kind=None, order=None) + Interenal helper function for adding docstrings to a common method of + `numpy.ndarray` and `numpy.generic`. - Returns the indices that would sort this array. + The provided docstring will be added to the given `numpy.ndarray` method. + For the `numpy.generic` method, a shorter docstring indicating that it is + identical to the `ndarray` method will be created. + Both methods will have a proper and identical `__text_signature__`. - Refer to `numpy.argsort` for full documentation. + Parameters + ---------- + name : str + Name of the method. + params : str + Parameter signature for the method without parentheses, for example, + ``"a, /, dtype=None, *, copy=False"``. + Parameter defaults must be understood by `ast.literal_eval`, i.e. strings, + bytes, numbers, tuples, lists, dicts, sets, booleans, or None. + doc : str + The full docstring for the `ndarray` method. + """ - See Also - -------- - numpy.argsort : equivalent function + # prepend the pos-only `$self` parameter to the method signature + if "/" not in params: + params = f"/, {params}" if params else "/" + params = f"$self, {params}" - """)) + # add docstring to `np.ndarray.{name}` + doc = textwrap.dedent(doc).strip() + doc_array = _METHOD_DOC_TEMPLATE.format(name=name, params=params, doc=doc) + add_newdoc("numpy._core.multiarray", "ndarray", (name, doc_array)) + # add docstring to `np.generic.{name}` + doc_scalar = f"Scalar method identical to `ndarray.{name}`." + doc_scalar = _METHOD_DOC_TEMPLATE.format(name=name, params=params, doc=doc_scalar) + add_newdoc("numpy._core.numerictypes", "generic", (name, doc_scalar)) -add_newdoc('numpy._core.multiarray', 'ndarray', ('argpartition', - """ - a.argpartition(kth, axis=-1, kind='introselect', order=None) - Returns the indices that would partition this array. +_array_method_doc('__array_namespace__', "*, api_version=None", + """ + a.__array_namespace__(*, api_version=None) - Refer to `numpy.argpartition` for full documentation. + For Array API compatibility. + """) + +_array_method_doc('__copy__', "", + """ + a.__copy__() + + Used if :func:`copy.copy` is called on an array. Returns a copy of the array. + + Equivalent to ``a.copy(order='K')``. + """) + +_array_method_doc('__deepcopy__', "memo, /", + """ + a.__deepcopy__(memo, /) + + Used if :func:`copy.deepcopy` is called on an array. + """) + +_array_method_doc('all', "axis=None, out=None, keepdims=False, *, where=True", + """ + a.all(axis=None, out=None, *, keepdims=, where=) + + Returns True if all elements evaluate to True. + + Refer to `numpy.all` for full documentation. See Also -------- - numpy.argpartition : equivalent function + numpy.all : equivalent function + """) - """)) +_array_method_doc('any', "axis=None, out=None, keepdims=False, *, where=True", + """ + a.any(axis=None, out=None, *, keepdims=, where=) + + Returns True if any of the elements of `a` evaluate to True. + Refer to `numpy.any` for full documentation. -add_newdoc('numpy._core.multiarray', 'ndarray', ('astype', + See Also + -------- + numpy.any : equivalent function + """) + +_array_method_doc('argmax', "axis=None, out=None, *, keepdims=False", + """ + a.argmax(axis=None, out=None, *, keepdims=False) + + Return indices of the maximum values along the given axis. + + Refer to `numpy.argmax` for full documentation. + + See Also + -------- + numpy.argmax : equivalent function + """) + +_array_method_doc('argmin', "axis=None, out=None, *, keepdims=False", + """ + a.argmin(axis=None, out=None, *, keepdims=False) + + Return indices of the minimum values along the given axis. + + Refer to `numpy.argmin` for detailed documentation. + + See Also + -------- + numpy.argmin : equivalent function + """) + +_array_method_doc('argsort', "axis=-1, kind=None, order=None, *, stable=None", + """ + a.argsort(axis=-1, kind=None, order=None, *, stable=None) + + Returns the indices that would sort this array. + + Refer to `numpy.argsort` for full documentation. + + See Also + -------- + numpy.argsort : equivalent function + """) + +_array_method_doc('astype', "dtype, order='K', casting='unsafe', subok=True, copy=True", """ a.astype(dtype, order='K', casting='unsafe', subok=True, copy=True) @@ -3185,7 +3524,7 @@ 'C' order otherwise, and 'K' means as close to the order the array elements appear in memory as possible. Default is 'K'. - casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional + casting : {'no', 'equiv', 'safe', 'same_kind', 'same_value', 'unsafe'}, optional Controls what kind of data casting may occur. Defaults to 'unsafe' for backwards compatibility. @@ -3195,6 +3534,12 @@ * 'same_kind' means only safe casts or casts within a kind, like float64 to float32, are allowed. * 'unsafe' means any data conversions may be done. + * 'same_value' means any data conversions may be done, but the values + must not change, including rounding of floats or overflow of ints + + .. versionadded:: 2.4 + Support for ``'same_value'`` was added. + subok : bool, optional If True, then sub-classes will be passed-through (default), otherwise the returned array will be forced to be a base-class array. @@ -3217,6 +3562,9 @@ ComplexWarning When casting from complex to float or int. To avoid this, one should use ``a.real.astype(t)``. + ValueError + When casting using ``'same_value'`` and the values change or would + overflow Examples -------- @@ -3225,13 +3573,19 @@ >>> x array([1. , 2. , 2.5]) - >>> x.astype(int) + >>> x.astype(np.int_) array([1, 2, 2]) - """)) + >>> x.astype(np.int_, casting="same_value") + Traceback (most recent call last): + ... + ValueError: could not cast 'same_value' double to long + >>> x[:2].astype(np.int_, casting="same_value") + array([1, 2]) + """) -add_newdoc('numpy._core.multiarray', 'ndarray', ('byteswap', +_array_method_doc('byteswap', "inplace=False", """ a.byteswap(inplace=False) @@ -3282,11 +3636,9 @@ >>> A.view(np.uint8) array([0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 3], dtype=uint8) + """) - """)) - - -add_newdoc('numpy._core.multiarray', 'ndarray', ('choose', +_array_method_doc('choose', "choices, out=None, mode='raise'", """ a.choose(choices, out=None, mode='raise') @@ -3297,13 +3649,11 @@ See Also -------- numpy.choose : equivalent function + """) - """)) - - -add_newdoc('numpy._core.multiarray', 'ndarray', ('clip', +_array_method_doc('clip', "min=None, max=None, out=None, **kwargs", """ - a.clip(min=None, max=None, out=None, **kwargs) + a.clip(min=, max=, out=None, **kwargs) Return an array whose values are limited to ``[min, max]``. One of max or min must be given. @@ -3313,11 +3663,9 @@ See Also -------- numpy.clip : equivalent function + """) - """)) - - -add_newdoc('numpy._core.multiarray', 'ndarray', ('compress', +_array_method_doc('compress', "condition, axis=None, out=None", """ a.compress(condition, axis=None, out=None) @@ -3328,11 +3676,9 @@ See Also -------- numpy.compress : equivalent function + """) - """)) - - -add_newdoc('numpy._core.multiarray', 'ndarray', ('conj', +_array_method_doc('conj', "", """ a.conj() @@ -3343,11 +3689,9 @@ See Also -------- numpy.conjugate : equivalent function + """) - """)) - - -add_newdoc('numpy._core.multiarray', 'ndarray', ('conjugate', +_array_method_doc('conjugate', "", """ a.conjugate() @@ -3358,11 +3702,9 @@ See Also -------- numpy.conjugate : equivalent function + """) - """)) - - -add_newdoc('numpy._core.multiarray', 'ndarray', ('copy', +_array_method_doc('copy', "order='C'", """ a.copy(order='C') @@ -3409,12 +3751,12 @@ >>> y.flags['C_CONTIGUOUS'] True - For arrays containing Python objects (e.g. dtype=object), + For arrays containing Python objects (e.g. dtype=np.object_), the copy is a shallow one. The new array will contain the same object which may lead to surprises if that object can be modified (is mutable): - >>> a = np.array([1, 'm', [2, 3, 4]], dtype=object) + >>> a = np.array([1, 'm', [2, 3, 4]], dtype=np.object_) >>> b = a.copy() >>> b[2][0] = 10 >>> a @@ -3424,18 +3766,16 @@ use `copy.deepcopy`: >>> import copy - >>> a = np.array([1, 'm', [2, 3, 4]], dtype=object) + >>> a = np.array([1, 'm', [2, 3, 4]], dtype=np.object_) >>> c = copy.deepcopy(a) >>> c[2][0] = 10 >>> c array([1, 'm', list([10, 3, 4])], dtype=object) >>> a array([1, 'm', list([2, 3, 4])], dtype=object) + """) - """)) - - -add_newdoc('numpy._core.multiarray', 'ndarray', ('cumprod', +_array_method_doc('cumprod', "axis=None, dtype=None, out=None", """ a.cumprod(axis=None, dtype=None, out=None) @@ -3446,11 +3786,9 @@ See Also -------- numpy.cumprod : equivalent function + """) - """)) - - -add_newdoc('numpy._core.multiarray', 'ndarray', ('cumsum', +_array_method_doc('cumsum', "axis=None, dtype=None, out=None", """ a.cumsum(axis=None, dtype=None, out=None) @@ -3461,11 +3799,9 @@ See Also -------- numpy.cumsum : equivalent function + """) - """)) - - -add_newdoc('numpy._core.multiarray', 'ndarray', ('diagonal', +_array_method_doc('diagonal', "offset=0, axis1=0, axis2=1", """ a.diagonal(offset=0, axis1=0, axis2=1) @@ -3478,14 +3814,9 @@ See Also -------- numpy.diagonal : equivalent function + """) - """)) - - -add_newdoc('numpy._core.multiarray', 'ndarray', ('dot')) - - -add_newdoc('numpy._core.multiarray', 'ndarray', ('dump', +_array_method_doc('dump', "file", """ a.dump(file) @@ -3496,25 +3827,21 @@ ---------- file : str or Path A string naming the dump file. + """) - """)) - - -add_newdoc('numpy._core.multiarray', 'ndarray', ('dumps', +_array_method_doc('dumps', "", """ a.dumps() Returns the pickle of the array as a string. - pickle.loads will convert the string back to an array. + ``pickle.loads`` will convert the string back to an array. Parameters ---------- None + """) - """)) - - -add_newdoc('numpy._core.multiarray', 'ndarray', ('fill', +_array_method_doc('fill', "value", """ a.fill(value) @@ -3541,7 +3868,7 @@ to a single array element. The following is a rare example where this distinction is important: - >>> a = np.array([None, None], dtype=object) + >>> a = np.array([None, None], dtype=np.object_) >>> a[0] = np.array(3) >>> a array([array(3), None], dtype=object) @@ -3554,11 +3881,9 @@ >>> a[...] = np.array(3) >>> a array([3, 3], dtype=object) + """) - """)) - - -add_newdoc('numpy._core.multiarray', 'ndarray', ('flatten', +_array_method_doc('flatten', "order='C'", """ a.flatten(order='C') @@ -3593,11 +3918,9 @@ array([1, 2, 3, 4]) >>> a.flatten('F') array([1, 3, 2, 4]) + """) - """)) - - -add_newdoc('numpy._core.multiarray', 'ndarray', ('getfield', +_array_method_doc('getfield', "dtype, offset=0", """ a.getfield(dtype, offset=0) @@ -3636,11 +3959,9 @@ >>> x.getfield(np.float64, offset=8) array([[1., 0.], [0., 4.]]) + """) - """)) - - -add_newdoc('numpy._core.multiarray', 'ndarray', ('item', +_array_method_doc('item', "*args", """ a.item(*args) @@ -3699,16 +4020,16 @@ For an array with object dtype, elements are returned as-is. - >>> a = np.array([np.int64(1)], dtype=object) + >>> a = np.array([np.int64(1)], dtype=np.object_) >>> a.item() #return np.int64 np.int64(1) + """) - """)) - +_KWARGS_REDUCE = "keepdims=, initial=, where=" -add_newdoc('numpy._core.multiarray', 'ndarray', ('max', - """ - a.max(axis=None, out=None, keepdims=False, initial=, where=True) +_array_method_doc('max', "axis=None, out=None, **kwargs", + f""" + a.max(axis=None, out=None, *, {_KWARGS_REDUCE}) Return the maximum along a given axis. @@ -3717,89 +4038,89 @@ See Also -------- numpy.amax : equivalent function + """) - """)) - - -add_newdoc('numpy._core.multiarray', 'ndarray', ('mean', - """ - a.mean(axis=None, dtype=None, out=None, keepdims=False, *, where=True) +_array_method_doc('min', "axis=None, out=None, **kwargs", + f""" + a.min(axis=None, out=None, *, {_KWARGS_REDUCE}) - Returns the average of the array elements along given axis. + Return the minimum along a given axis. - Refer to `numpy.mean` for full documentation. + Refer to `numpy.amin` for full documentation. See Also -------- - numpy.mean : equivalent function - - """)) - + numpy.amin : equivalent function + """) -add_newdoc('numpy._core.multiarray', 'ndarray', ('min', - """ - a.min(axis=None, out=None, keepdims=False, initial=, where=True) +_array_method_doc('prod', "axis=None, dtype=None, out=None, **kwargs", + f""" + a.prod(axis=None, dtype=None, out=None, *, {_KWARGS_REDUCE}) - Return the minimum along a given axis. + Return the product of the array elements over the given axis - Refer to `numpy.amin` for full documentation. + Refer to `numpy.prod` for full documentation. See Also -------- - numpy.amin : equivalent function + numpy.prod : equivalent function + """) - """)) +_array_method_doc('sum', "axis=None, dtype=None, out=None, **kwargs", + f""" + a.sum(axis=None, dtype=None, out=None, *, {_KWARGS_REDUCE}) + + Return the sum of the array elements over the given axis. + Refer to `numpy.sum` for full documentation. -add_newdoc('numpy._core.multiarray', 'ndarray', ('nonzero', + See Also + -------- + numpy.sum : equivalent function + """) + +_array_method_doc('mean', "axis=None, dtype=None, out=None, **kwargs", """ - a.nonzero() + a.mean(axis=None, dtype=None, out=None, *, keepdims=, where=) - Return the indices of the elements that are non-zero. + Returns the average of the array elements along given axis. - Refer to `numpy.nonzero` for full documentation. + Refer to `numpy.mean` for full documentation. See Also -------- - numpy.nonzero : equivalent function - - """)) - + numpy.mean : equivalent function + """) -add_newdoc('numpy._core.multiarray', 'ndarray', ('prod', +_array_method_doc('nonzero', "", """ - a.prod(axis=None, dtype=None, out=None, keepdims=False, - initial=1, where=True) + a.nonzero() - Return the product of the array elements over the given axis + Return the indices of the elements that are non-zero. - Refer to `numpy.prod` for full documentation. + Refer to `numpy.nonzero` for full documentation. See Also -------- - numpy.prod : equivalent function - - """)) - + numpy.nonzero : equivalent function + """) -add_newdoc('numpy._core.multiarray', 'ndarray', ('put', +_array_method_doc('put', "indices, values, /, mode='raise'", """ a.put(indices, values, mode='raise') - Set ``a.flat[n] = values[n]`` for all `n` in indices. + Set ``a.flat[n] = values[n]`` for all ``n`` in indices. Refer to `numpy.put` for full documentation. See Also -------- numpy.put : equivalent function + """) - """)) - - -add_newdoc('numpy._core.multiarray', 'ndarray', ('ravel', +_array_method_doc('ravel', "order='C'", """ - a.ravel([order]) + a.ravel(order='C') Return a flattened array. @@ -3808,13 +4129,10 @@ See Also -------- numpy.ravel : equivalent function - ndarray.flat : a flat iterator on the array. + """) - """)) - - -add_newdoc('numpy._core.multiarray', 'ndarray', ('repeat', +_array_method_doc('repeat', "repeats, /, axis=None", """ a.repeat(repeats, axis=None) @@ -3825,13 +4143,12 @@ See Also -------- numpy.repeat : equivalent function + """) - """)) - - -add_newdoc('numpy._core.multiarray', 'ndarray', ('reshape', +_array_method_doc('reshape', "*shape, order='C', copy=None", """ a.reshape(shape, /, *, order='C', copy=None) + a.reshape(*shape, order='C', copy=None) Returns an array containing the same data with a new shape. @@ -3845,15 +4162,13 @@ ----- Unlike the free function `numpy.reshape`, this method on `ndarray` allows the elements of the shape parameter to be passed in as separate arguments. - For example, ``a.reshape(10, 11)`` is equivalent to - ``a.reshape((10, 11))``. - - """)) - + For example, ``a.reshape(4, 2)`` is equivalent to ``a.reshape((4, 2))``. + """) -add_newdoc('numpy._core.multiarray', 'ndarray', ('resize', +_array_method_doc('resize', "*new_shape, refcheck=True", """ - a.resize(new_shape, refcheck=True) + a.resize(new_shape, /, *, refcheck=True) + a.resize(*new_shape, refcheck=True) Change shape and size of array in-place. @@ -3941,11 +4256,9 @@ array([[0]]) >>> c array([[0]]) + """) - """)) - - -add_newdoc('numpy._core.multiarray', 'ndarray', ('round', +_array_method_doc('round', "decimals=0, out=None", """ a.round(decimals=0, out=None) @@ -3956,26 +4269,22 @@ See Also -------- numpy.around : equivalent function + """) - """)) - - -add_newdoc('numpy._core.multiarray', 'ndarray', ('searchsorted', +_array_method_doc('searchsorted', "v, /, side='left', sorter=None", """ a.searchsorted(v, side='left', sorter=None) - Find indices where elements of v should be inserted in a to maintain order. + Find indices where elements of `v` should be inserted in `a` to maintain order. - For full documentation, see `numpy.searchsorted` + For full documentation, see `numpy.searchsorted`. See Also -------- numpy.searchsorted : equivalent function + """) - """)) - - -add_newdoc('numpy._core.multiarray', 'ndarray', ('setfield', +_array_method_doc('setfield', "val, /, dtype, offset=0", """ a.setfield(val, dtype, offset=0) @@ -4023,11 +4332,9 @@ array([[1., 0., 0.], [0., 1., 0.], [0., 0., 1.]]) + """) - """)) - - -add_newdoc('numpy._core.multiarray', 'ndarray', ('setflags', +_array_method_doc('setflags', "*, write=None, align=None, uic=None", """ a.setflags(write=None, align=None, uic=None) @@ -4100,13 +4407,11 @@ Traceback (most recent call last): File "", line 1, in ValueError: cannot set WRITEBACKIFCOPY flag to True + """) - """)) - - -add_newdoc('numpy._core.multiarray', 'ndarray', ('sort', +_array_method_doc('sort', "axis=-1, kind=None, order=None, *, stable=None", """ - a.sort(axis=-1, kind=None, order=None) + a.sort(axis=-1, kind=None, order=None, *, stable=None) Sort an array in-place. Refer to `numpy.sort` for full documentation. @@ -4126,6 +4431,13 @@ be specified as a string, and not all fields need be specified, but unspecified fields will still be used, in the order in which they come up in the dtype, to break ties. + stable : bool, optional + Sort stability. If ``True``, the returned array will maintain + the relative order of ``a`` values which compare as equal. + If ``False`` or ``None``, this is not guaranteed. Internally, + this option selects ``kind='stable'``. Default: ``None``. + + .. versionadded:: 2.0.0 See Also -------- @@ -4160,70 +4472,9 @@ >>> a array([(b'c', 1), (b'a', 2)], dtype=[('x', 'S1'), ('y', '>> import numpy as np - >>> a = np.array([3, 4, 2, 1]) - >>> a.partition(3) - >>> a - array([2, 1, 3, 4]) # may vary - - >>> a.partition((1, 3)) - >>> a - array([1, 2, 3, 4]) - """)) - - -add_newdoc('numpy._core.multiarray', 'ndarray', ('squeeze', +_array_method_doc('squeeze', "axis=None", """ a.squeeze(axis=None) @@ -4234,13 +4485,13 @@ See Also -------- numpy.squeeze : equivalent function + """) - """)) - +_KWARGS_STD = "*, keepdims=, where=, mean=" -add_newdoc('numpy._core.multiarray', 'ndarray', ('std', - """ - a.std(axis=None, dtype=None, out=None, ddof=0, keepdims=False, *, where=True) +_array_method_doc('std', "axis=None, dtype=None, out=None, ddof=0, **kwargs", + f""" + a.std(axis=None, dtype=None, out=None, ddof=0, {_KWARGS_STD}) Returns the standard deviation of the array elements along given axis. @@ -4249,28 +4500,24 @@ See Also -------- numpy.std : equivalent function + """) - """)) - - -add_newdoc('numpy._core.multiarray', 'ndarray', ('sum', - """ - a.sum(axis=None, dtype=None, out=None, keepdims=False, initial=0, where=True) +_array_method_doc('var', "axis=None, dtype=None, out=None, ddof=0, **kwargs", + f""" + a.var(axis=None, dtype=None, out=None, ddof=0, {_KWARGS_STD}) - Return the sum of the array elements over the given axis. + Returns the variance of the array elements, along given axis. - Refer to `numpy.sum` for full documentation. + Refer to `numpy.var` for full documentation. See Also -------- - numpy.sum : equivalent function - - """)) - + numpy.var : equivalent function + """) -add_newdoc('numpy._core.multiarray', 'ndarray', ('swapaxes', +_array_method_doc('swapaxes', "axis1, axis2, /", """ - a.swapaxes(axis1, axis2) + a.swapaxes(axis1, axis2, /) Return a view of the array with `axis1` and `axis2` interchanged. @@ -4279,11 +4526,9 @@ See Also -------- numpy.swapaxes : equivalent function + """) - """)) - - -add_newdoc('numpy._core.multiarray', 'ndarray', ('take', +_array_method_doc('take', "indices, /, axis=None, out=None, mode='raise'", """ a.take(indices, axis=None, out=None, mode='raise') @@ -4294,13 +4539,31 @@ See Also -------- numpy.take : equivalent function + """) - """)) +_array_method_doc('to_device', "device, /, *, stream=None", + """ + a.to_device(device, /, *, stream=None) + + For Array API compatibility. Since NumPy only supports CPU arrays, this + method is a no-op that returns the same array. + Parameters + ---------- + device : "cpu" + Must be ``"cpu"``. + stream : None, optional + Currently unsupported. -add_newdoc('numpy._core.multiarray', 'ndarray', ('tofile', + Returns + ------- + out : Self + Returns the same array. + """) + +_array_method_doc('tofile', "fid, /, sep='', format='%s'", """ - a.tofile(fid, sep="", format="%s") + a.tofile(fid, /, sep='', format='%s') Write array to a file as text or binary (default). @@ -4334,11 +4597,9 @@ file, bypassing the file object's ``write`` method. As a result, tofile cannot be used with files objects supporting compression (e.g., GzipFile) or file-like objects that do not support ``fileno()`` (e.g., BytesIO). + """) - """)) - - -add_newdoc('numpy._core.multiarray', 'ndarray', ('tolist', +_array_method_doc('tolist', "", """ a.tolist() @@ -4346,7 +4607,7 @@ Return a copy of the array data as a (nested) Python list. Data items are converted to the nearest compatible builtin Python type, via - the `~numpy.ndarray.item` function. + the `~numpy.ndarray.item` method. If ``a.ndim`` is 0, then since the depth of the nested list is 0, it will not be a list at all, but a simple Python scalar. @@ -4400,10 +4661,10 @@ TypeError: iteration over a 0-d array >>> a.tolist() 1 - """)) - + """) -add_newdoc('numpy._core.multiarray', 'ndarray', ('tobytes', """ +_array_method_doc('tobytes', "order='C'", + """ a.tobytes(order='C') Construct Python bytes containing the raw data bytes in the array. @@ -4440,11 +4701,9 @@ True >>> x.tobytes('F') b'\\x00\\x00\\x02\\x00\\x01\\x00\\x03\\x00' + """) - """)) - - -add_newdoc('numpy._core.multiarray', 'ndarray', ('trace', +_array_method_doc('trace', "offset=0, axis1=0, axis2=1, dtype=None, out=None", """ a.trace(offset=0, axis1=0, axis2=1, dtype=None, out=None) @@ -4455,11 +4714,9 @@ See Also -------- numpy.trace : equivalent function + """) - """)) - - -add_newdoc('numpy._core.multiarray', 'ndarray', ('transpose', +_array_method_doc('transpose', "*axes", """ a.transpose(*axes) @@ -4512,26 +4769,9 @@ array([1, 2, 3, 4]) >>> a.transpose() array([1, 2, 3, 4]) + """) - """)) - - -add_newdoc('numpy._core.multiarray', 'ndarray', ('var', - """ - a.var(axis=None, dtype=None, out=None, ddof=0, keepdims=False, *, where=True) - - Returns the variance of the array elements, along given axis. - - Refer to `numpy.var` for full documentation. - - See Also - -------- - numpy.var : equivalent function - - """)) - - -add_newdoc('numpy._core.multiarray', 'ndarray', ('view', +_array_method_doc('view', "*args, **kwargs", """ a.view([dtype][, type]) @@ -4540,7 +4780,7 @@ .. note:: Passing None for ``dtype`` is different from omitting the parameter, since the former invokes ``dtype(None)`` which is an alias for - ``dtype('float64')``. + ``dtype(np.float64)``. Parameters ---------- @@ -4648,8 +4888,7 @@ [[2312, 2826], [5396, 5910]]], dtype=int16) - - """)) + """) ############################################################################## @@ -4660,6 +4899,9 @@ add_newdoc('numpy._core.umath', 'frompyfunc', """ + frompyfunc(func, /, nin, nout, **kwargs) + -- + frompyfunc(func, /, nin, nout, *[, identity]) Takes an arbitrary Python function and returns a NumPy ufunc. @@ -4754,7 +4996,7 @@ add_newdoc('numpy._core.multiarray', 'get_handler_name', """ - get_handler_name(a: ndarray) -> str,None + get_handler_name(a: ndarray) -> str | None Return the name of the memory handler used by `a`. If not provided, return the name of the memory handler that will be used to allocate data for the @@ -5126,6 +5368,9 @@ add_newdoc('numpy._core', 'ufunc', ('reduce', """ + reduce($self, array, /, axis=0, dtype=None, out=None, **kwargs) + -- + reduce(array, axis=0, dtype=None, out=None, keepdims=False, initial=, where=True) Reduces `array`'s dimension by one, by applying ufunc along one axis. @@ -5252,6 +5497,9 @@ add_newdoc('numpy._core', 'ufunc', ('accumulate', """ + accumulate($self, array, /, axis=0, dtype=None, out=None) + -- + accumulate(array, axis=0, dtype=None, out=None) Accumulate the result of applying the operator to all elements. @@ -5330,6 +5578,9 @@ add_newdoc('numpy._core', 'ufunc', ('reduceat', """ + reduceat($self, array, /, indices, axis=0, dtype=None, out=None) + -- + reduceat(array, indices, axis=0, dtype=None, out=None) Performs a (local) reduce with specified slices over a single axis. @@ -5438,6 +5689,9 @@ add_newdoc('numpy._core', 'ufunc', ('outer', r""" + outer($self, A, B, /, **kwargs) + -- + outer(A, B, /, **kwargs) Apply the ufunc `op` to all pairs (a, b) with a in `A` and b in `B`. @@ -5509,6 +5763,9 @@ add_newdoc('numpy._core', 'ufunc', ('at', """ + at($self, a, indices, b=None, /) + -- + at(a, indices, b=None, /) Performs unbuffered in place operation on operand 'a' for elements @@ -5560,6 +5817,9 @@ add_newdoc('numpy._core', 'ufunc', ('resolve_dtypes', """ + resolve_dtypes($self, dtypes, *, signature=None, casting=None, reduction=False) + -- + resolve_dtypes(dtypes, *, signature=None, casting=None, reduction=False) Find the dtypes NumPy will use for the operation. Both input and @@ -5607,8 +5867,8 @@ This API requires passing dtypes, define them for convenience: >>> import numpy as np - >>> int32 = np.dtype("int32") - >>> float32 = np.dtype("float32") + >>> int32 = np.dtype(np.int32) + >>> float32 = np.dtype(np.float32) The typical ufunc call does not pass an output dtype. `numpy.add` has two inputs and one output, so leave the output as ``None`` (not provided): @@ -5632,6 +5892,9 @@ add_newdoc('numpy._core', 'ufunc', ('_resolve_dtypes_and_context', """ + _resolve_dtypes_and_context($self, dtypes, *, signature=None, casting=None, reduction=False) + -- + _resolve_dtypes_and_context(dtypes, *, signature=None, casting=None, reduction=False) See `numpy.ufunc.resolve_dtypes` for parameter information. This @@ -5655,6 +5918,9 @@ add_newdoc('numpy._core', 'ufunc', ('_get_strided_loop', """ + _get_strided_loop($self, call_info, /, *, fixed_strides=None) + -- + _get_strided_loop(call_info, /, *, fixed_strides=None) This function fills in the ``call_info`` capsule to include all @@ -5724,6 +5990,9 @@ add_newdoc('numpy._core.multiarray', 'dtype', """ + dtype(dtype, align=False, copy=False, **kwargs) + -- + dtype(dtype, align=False, copy=False, [metadata]) Create a data type object. @@ -5826,11 +6095,11 @@ -------- >>> import numpy as np - >>> x = np.dtype('i4') + >>> x = np.dtype(np.int32) >>> x.alignment 4 - >>> x = np.dtype(float) + >>> x = np.dtype(np.float64) >>> x.alignment 8 @@ -5855,11 +6124,11 @@ -------- >>> import numpy as np - >>> dt = np.dtype('i2') + >>> dt = np.dtype(np.int16) >>> dt.byteorder '=' >>> # endian is not relevant for 8 bit numbers - >>> np.dtype('i1').byteorder + >>> np.dtype(np.int8).byteorder '|' >>> # or ASCII strings >>> np.dtype('S2').byteorder @@ -5944,7 +6213,7 @@ >>> import numpy as np >>> dt = np.dtype([('name', np.str_, 16), ('grades', np.float64, (2,))]) >>> print(dt.fields) - {'name': (dtype('|S16'), 0), 'grades': (dtype(('float64',(2,))), 16)} + {'name': (dtype('>> import numpy as np - >>> dt = np.dtype('i2') + >>> dt = np.dtype(np.int16) >>> dt.isbuiltin 1 - >>> dt = np.dtype('f8') + >>> dt = np.dtype(np.float64) >>> dt.isbuiltin 1 - >>> dt = np.dtype([('field1', 'f8')]) + >>> dt = np.dtype([('field1', np.float64)]) >>> dt.isbuiltin 0 @@ -6079,13 +6348,13 @@ -------- >>> import numpy as np - >>> dt = np.dtype('i4') + >>> dt = np.dtype(np.int32) >>> dt.kind 'i' - >>> dt = np.dtype('f8') + >>> dt = np.dtype(np.float64) >>> dt.kind 'f' - >>> dt = np.dtype([('field1', 'f8')]) + >>> dt = np.dtype([('field1', np.float64)]) >>> dt.kind 'V' @@ -6251,7 +6520,7 @@ >>> x.subdtype (dtype('float32'), (8,)) - >>> x = np.dtype('i2') + >>> x = np.dtype(np.int16) >>> x.subdtype >>> @@ -6273,7 +6542,7 @@ >>> x.base dtype('float32') - >>> x = np.dtype('i2') + >>> x = np.dtype(np.int16) >>> x.base dtype('int16') @@ -6290,6 +6559,9 @@ add_newdoc('numpy._core.multiarray', 'dtype', ('newbyteorder', """ + newbyteorder($self, new_order='S', /) + -- + newbyteorder(new_order='S', /) Return a new dtype with a different byte order. @@ -6408,7 +6680,7 @@ add_newdoc('numpy._core.multiarray', 'dtype', ('__gt__', """ - __ge__(value, /) + __gt__(value, /) Return ``self > value``. @@ -6446,6 +6718,9 @@ add_newdoc('numpy._core.multiarray', 'busdaycalendar', """ + busdaycalendar(weekmask='1111100', holidays=None) + -- + busdaycalendar(weekmask='1111100', holidays=None) A business day calendar object that efficiently stores information @@ -6569,6 +6844,9 @@ add_newdoc('numpy._core.multiarray', 'datetime_data', """ + datetime_data(dtype, /) + -- + datetime_data(dtype, /) Get information about the step size of a date or time type. @@ -6626,21 +6904,11 @@ # Attributes -def refer_to_array_attribute(attr, method=True): - docstring = """ - Scalar {} identical to the corresponding array attribute. - - Please see `ndarray.{}`. - """ - - return attr, docstring.format("method" if method else "attribute", attr) +add_newdoc('numpy._core.numerictypes', 'generic', ('T', + """Scalar attribute identical to `ndarray.T`.""")) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('T', method=False)) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('base', method=False)) +add_newdoc('numpy._core.numerictypes', 'generic', ('base', + """Scalar attribute identical to `ndarray.base`.""")) add_newdoc('numpy._core.numerictypes', 'generic', ('data', """Pointer to start of data.""")) @@ -6677,153 +6945,12 @@ def refer_to_array_attribute(attr, method=True): # Methods -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('all')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('any')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('argmax')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('argmin')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('argsort')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('astype')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('byteswap')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('choose')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('clip')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('compress')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('conjugate')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('copy')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('cumprod')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('cumsum')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('diagonal')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('dump')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('dumps')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('fill')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('flatten')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('getfield')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('item')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('max')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('mean')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('min')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('nonzero')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('prod')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('put')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('ravel')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('repeat')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('reshape')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('resize')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('round')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('searchsorted')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('setfield')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('setflags')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('sort')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('squeeze')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('std')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('sum')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('swapaxes')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('take')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('tofile')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('tolist')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('tostring')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('trace')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('transpose')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('var')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('view')) - add_newdoc('numpy._core.numerictypes', 'number', ('__class_getitem__', """ - __class_getitem__(item, /) + __class_getitem__($cls, item, /) + -- + + number.__class_getitem__(item, /) Return a parametrized wrapper around the `~numpy.number` type. @@ -6914,8 +7041,67 @@ def refer_to_array_attribute(attr, method=True): """) +############################################################################## +# +# Documentation for `dtypes.*` classes +# +############################################################################## + +for _dtype_name, _signature, _sctype_name in ( + ("BoolDType", "()", "bool"), + ("Int8DType", "()", "int8"), + ("UInt8DType", "()", "uint8"), + ("Int16DType", "()", "int16"), + ("UInt16DType", "()", "uint16"), + ("Int32DType", "()", "int32"), + ("IntDType", "()", "intc"), + ("UInt32DType", "()", "uint32"), + ("UIntDType", "()", "uintc"), + ("Int64DType", "()", "int64"), + ("UInt64DType", "()", "uint64"), + ("LongLongDType", "()", "longlong"), + ("ULongLongDType", "()", "ulonglong"), + ("Float16DType", "()", "float16"), + ("Float32DType", "()", "float32"), + ("Float64DType", "()", "float64"), + ("LongDoubleDType", "()", "longdouble"), + ("Complex64DType", "()", "complex64"), + ("Complex128DType", "()", "complex128"), + ("CLongDoubleDType", "()", "clongdouble"), + ("ObjectDType", "()", "object"), + ("BytesDType", "(size, /)", "bytes_"), + ("StrDType", "(size, /)", "str_"), + ("VoidDType", "(length, /)", "void"), + ("DateTime64DType", "(unit, /)", "datetime64"), + ("TimeDelta64DType", "(unit, /)", "timedelta64"), +): + _extra_docs = "" + if _dtype_name in {"VoidDType", "DateTime64DType", "TimeDelta64DType"}: + _extra_docs = f""" + .. warning:: + ``np.dtypes.{_dtype_name}`` cannot be instantiated directly. + Use ``np.dtype("{_sctype_name}[{{unit}}]")`` instead. + """ + + add_newdoc('numpy.dtypes', _dtype_name, + f""" + {_dtype_name}{_signature} + -- + + DType class corresponding to the `numpy.{_sctype_name}` scalar type. + {_extra_docs} + See `numpy.dtype` for the typical way to create dtype instances + and :ref:`arrays.dtypes` for additional information. + """) + + del _dtype_name, _signature, _sctype_name, _extra_docs # avoid namespace pollution + + add_newdoc('numpy._core.multiarray', 'StringDType', """ + StringDType(*, coerce=True, **kwargs) + -- + StringDType(*, na_object=np._NoValue, coerce=True) Create a StringDType instance. diff --git a/numpy/_core/_add_newdocs.pyi b/numpy/_core/_add_newdocs.pyi index b23c3b1adedd..2d004814fdcf 100644 --- a/numpy/_core/_add_newdocs.pyi +++ b/numpy/_core/_add_newdocs.pyi @@ -1,3 +1,2 @@ +from .function_base import add_newdoc as add_newdoc from .overrides import get_array_function_like_doc as get_array_function_like_doc - -def refer_to_array_attribute(attr: str, method: bool = True) -> tuple[str, str]: ... diff --git a/numpy/_core/_add_newdocs_scalars.py b/numpy/_core/_add_newdocs_scalars.py index 96170d80c7c9..3f9eca5e47f3 100644 --- a/numpy/_core/_add_newdocs_scalars.py +++ b/numpy/_core/_add_newdocs_scalars.py @@ -6,8 +6,7 @@ import os import sys -from numpy._core import dtype -from numpy._core import numerictypes as _numerictypes +from numpy._core import dtype, numerictypes as _numerictypes from numpy._core.function_base import add_newdoc ############################################################################## @@ -49,7 +48,7 @@ def type_aliases_gen(): ('complex128', 'Complex number type composed of 2 64-bit-precision floating-point numbers'), ('complex192', 'Complex number type composed of 2 96-bit extended-precision floating-point numbers'), ('complex256', 'Complex number type composed of 2 128-bit extended-precision floating-point numbers'), - ]) +]) def _get_platform_and_machine(): @@ -68,258 +67,240 @@ def _get_platform_and_machine(): _system, _machine = _get_platform_and_machine() _doc_alias_string = f":Alias on this platform ({_system} {_machine}):" +# docstring prefix that cpython uses to populate `__text_signature__` +_ARGUMENT_CLINIC_TEMPLATE = """{name}{signature} +-- -def add_newdoc_for_scalar_type(obj, fixed_aliases, doc): - # note: `:field: value` is rST syntax which renders as field lists. - o = getattr(_numerictypes, obj) - - character_code = dtype(o).char - canonical_name_doc = "" if obj == o.__name__ else \ - f":Canonical name: `numpy.{obj}`\n " - if fixed_aliases: - alias_doc = ''.join(f":Alias: `numpy.{alias}`\n " - for alias in fixed_aliases) - else: - alias_doc = '' - alias_doc += ''.join(f"{_doc_alias_string} `numpy.{alias}`: {doc}.\n " - for (alias_type, alias, doc) in possible_aliases if alias_type is o) - - docstring = f""" - {doc.strip()} - - :Character code: ``'{character_code}'`` - {canonical_name_doc}{alias_doc} - """ - - add_newdoc('numpy._core.numerictypes', obj, docstring) - - -_bool_docstring = ( - """ - Boolean type (True or False), stored as a byte. - - .. warning:: - - The :class:`bool` type is not a subclass of the :class:`int_` type - (the :class:`bool` is not even a number type). This is different - than Python's default implementation of :class:`bool` as a - sub-class of :class:`int`. - """ -) - -add_newdoc_for_scalar_type('bool', [], _bool_docstring) - -add_newdoc_for_scalar_type('bool_', [], _bool_docstring) - -add_newdoc_for_scalar_type('byte', [], - """ - Signed integer type, compatible with C ``char``. - """) - -add_newdoc_for_scalar_type('short', [], - """ - Signed integer type, compatible with C ``short``. - """) - -add_newdoc_for_scalar_type('intc', [], - """ - Signed integer type, compatible with C ``int``. - """) - -# TODO: These docs probably need an if to highlight the default rather than -# the C-types (and be correct). -add_newdoc_for_scalar_type('int_', [], - """ - Default signed integer type, 64bit on 64bit systems and 32bit on 32bit - systems. - """) - -add_newdoc_for_scalar_type('longlong', [], - """ - Signed integer type, compatible with C ``long long``. - """) - -add_newdoc_for_scalar_type('ubyte', [], - """ - Unsigned integer type, compatible with C ``unsigned char``. - """) - -add_newdoc_for_scalar_type('ushort', [], - """ - Unsigned integer type, compatible with C ``unsigned short``. - """) +{docstring}""" -add_newdoc_for_scalar_type('uintc', [], - """ - Unsigned integer type, compatible with C ``unsigned int``. - """) - -add_newdoc_for_scalar_type('uint', [], - """ - Unsigned signed integer type, 64bit on 64bit systems and 32bit on 32bit - systems. - """) - -add_newdoc_for_scalar_type('ulonglong', [], - """ - Signed integer type, compatible with C ``unsigned long long``. - """) - -add_newdoc_for_scalar_type('half', [], - """ - Half-precision floating-point number type. - """) - -add_newdoc_for_scalar_type('single', [], - """ - Single-precision floating-point number type, compatible with C ``float``. - """) +def add_newdoc_for_scalar_type(name: str, text_signature: str, doc: str) -> None: + # note: `:field: value` is rST syntax which renders as field lists. + cls = getattr(_numerictypes, name) + module = cls.__module__ -add_newdoc_for_scalar_type('double', [], - """ - Double-precision floating-point number type, compatible with Python - :class:`float` and C ``double``. - """) + lines_extra = [ + "", # blank line after main doc + f":Character code: ``{dtype(cls).char!r}``", + ] -add_newdoc_for_scalar_type('longdouble', [], - """ - Extended-precision floating-point number type, compatible with C - ``long double`` but not necessarily with IEEE 754 quadruple-precision. - """) + if name != cls.__name__: + lines_extra.append(f":Canonical name: `{module}.{name}`") -add_newdoc_for_scalar_type('csingle', [], - """ - Complex number type composed of two single-precision floating-point - numbers. - """) + lines_extra.extend( + f"{_doc_alias_string} `{module}.{alias}`: {doc}." + for alias_type, alias, doc in possible_aliases + if alias_type is cls + ) -add_newdoc_for_scalar_type('cdouble', [], - """ - Complex number type composed of two double-precision floating-point - numbers, compatible with Python :class:`complex`. - """) + docstring = _ARGUMENT_CLINIC_TEMPLATE.format( + name=cls.__name__, # must match the class name + signature=text_signature, + docstring="\n".join([doc.strip(), *lines_extra]), + ) + add_newdoc('numpy._core.numerictypes', name, docstring) -add_newdoc_for_scalar_type('clongdouble', [], - """ - Complex number type composed of two extended-precision floating-point - numbers. - """) -add_newdoc_for_scalar_type('object_', [], - """ - Any Python object. - """) - -add_newdoc_for_scalar_type('str_', [], - r""" - A unicode string. - - This type strips trailing null codepoints. - - >>> s = np.str_("abc\x00") - >>> s - 'abc' - - Unlike the builtin :class:`str`, this supports the - :ref:`python:bufferobjects`, exposing its contents as UCS4: - - >>> m = memoryview(np.str_("abc")) - >>> m.format - '3w' - >>> m.tobytes() - b'a\x00\x00\x00b\x00\x00\x00c\x00\x00\x00' - """) - -add_newdoc_for_scalar_type('bytes_', [], - r""" - A byte string. - - When used in arrays, this type strips trailing null bytes. - """) - -add_newdoc_for_scalar_type('void', [], - r""" - np.void(length_or_data, /, dtype=None) - - Create a new structured or unstructured void scalar. - - Parameters - ---------- - length_or_data : int, array-like, bytes-like, object - One of multiple meanings (see notes). The length or - bytes data of an unstructured void. Or alternatively, - the data to be stored in the new scalar when `dtype` - is provided. - This can be an array-like, in which case an array may - be returned. - dtype : dtype, optional - If provided the dtype of the new scalar. This dtype must - be "void" dtype (i.e. a structured or unstructured void, - see also :ref:`defining-structured-types`). - - .. versionadded:: 1.24 - - Notes - ----- - For historical reasons and because void scalars can represent both - arbitrary byte data and structured dtypes, the void constructor - has three calling conventions: - - 1. ``np.void(5)`` creates a ``dtype="V5"`` scalar filled with five - ``\0`` bytes. The 5 can be a Python or NumPy integer. - 2. ``np.void(b"bytes-like")`` creates a void scalar from the byte string. - The dtype itemsize will match the byte string length, here ``"V10"``. - 3. When a ``dtype=`` is passed the call is roughly the same as an - array creation. However, a void scalar rather than array is returned. - - Please see the examples which show all three different conventions. +for bool_name in ('bool', 'bool_'): + add_newdoc_for_scalar_type(bool_name, '(value=False, /)', """ +Boolean type (True or False), stored as a byte. - Examples - -------- - >>> np.void(5) - np.void(b'\x00\x00\x00\x00\x00') - >>> np.void(b'abcd') - np.void(b'\x61\x62\x63\x64') - >>> np.void((3.2, b'eggs'), dtype="d,S5") - np.void((3.2, b'eggs'), dtype=[('f0', '>> np.void(3, dtype=[('x', np.int8), ('y', np.int8)]) - np.void((3, 3), dtype=[('x', 'i1'), ('y', 'i1')]) - - """) - -add_newdoc_for_scalar_type('datetime64', [], - """ - If created from a 64-bit integer, it represents an offset from - ``1970-01-01T00:00:00``. - If created from string, the string can be in ISO 8601 date - or datetime format. +.. warning:: - When parsing a string to create a datetime object, if the string contains - a trailing timezone (A 'Z' or a timezone offset), the timezone will be - dropped and a User Warning is given. + The :class:`bool` type is not a subclass of the :class:`int_` type + (the :class:`bool` is not even a number type). This is different + than Python's default implementation of :class:`bool` as a + sub-class of :class:`int`. +""") - Datetime64 objects should be considered to be UTC and therefore have an - offset of +0000. +add_newdoc_for_scalar_type('byte', '(value=0, /)', """ +Signed integer type, compatible with C ``char``. +""") - >>> np.datetime64(10, 'Y') - np.datetime64('1980') - >>> np.datetime64('1980', 'Y') - np.datetime64('1980') - >>> np.datetime64(10, 'D') - np.datetime64('1970-01-11') +add_newdoc_for_scalar_type('short', '(value=0, /)', """ +Signed integer type, compatible with C ``short``. +""") - See :ref:`arrays.datetime` for more information. - """) +add_newdoc_for_scalar_type('intc', '(value=0, /)', """ +Signed integer type, compatible with C ``int``. +""") -add_newdoc_for_scalar_type('timedelta64', [], - """ - A timedelta stored as a 64-bit integer. +add_newdoc_for_scalar_type('long', '(value=0, /)', """ +Signed integer type, compatible with C ``long``. +""") - See :ref:`arrays.datetime` for more information. - """) +# TODO: These docs probably need an if to highlight the default rather than +# the C-types (and be correct). +add_newdoc_for_scalar_type('int_', '(value=0, /)', """ +Default signed integer type, 64bit on 64bit systems and 32bit on 32bit systems. +""") + +add_newdoc_for_scalar_type('longlong', '(value=0, /)', """ +Signed integer type, compatible with C ``long long``. +""") + +add_newdoc_for_scalar_type('ubyte', '(value=0, /)', """ +Unsigned integer type, compatible with C ``unsigned char``. +""") + +add_newdoc_for_scalar_type('ushort', '(value=0, /)', """ +Unsigned integer type, compatible with C ``unsigned short``. +""") + +add_newdoc_for_scalar_type('uintc', '(value=0, /)', """ +Unsigned integer type, compatible with C ``unsigned int``. +""") + +add_newdoc_for_scalar_type('uint', '(value=0, /)', """ +Unsigned signed integer type, 64bit on 64bit systems and 32bit on 32bit systems. +""") + +add_newdoc_for_scalar_type('ulong', '(value=0, /)', """ +Unsigned integer type, compatible with C ``unsigned long``. +""") + +add_newdoc_for_scalar_type('ulonglong', '(value=0, /)', """ +Unsigned integer type, compatible with C ``unsigned long long``. +""") + +add_newdoc_for_scalar_type('half', '(value=0, /)', """ +Half-precision floating-point number type. +""") + +add_newdoc_for_scalar_type('single', '(value=0, /)', """ +Single-precision floating-point number type, compatible with C ``float``. +""") + +add_newdoc_for_scalar_type('double', '(value=0, /)', """ +Double-precision floating-point number type, compatible with Python :class:`float` and C ``double``. +""") + +add_newdoc_for_scalar_type('longdouble', '(value=0, /)', """ +Extended-precision floating-point number type, compatible with C ``long double`` +but not necessarily with IEEE 754 quadruple-precision. +""") + +add_newdoc_for_scalar_type('csingle', '(real=0, imag=0, /)', """ +Complex number type composed of two single-precision floating-point numbers. +""") + +add_newdoc_for_scalar_type('cdouble', '(real=0, imag=0, /)', """ +Complex number type composed of two double-precision floating-point numbers, +compatible with Python :class:`complex`. +""") + +add_newdoc_for_scalar_type('clongdouble', '(real=0, imag=0, /)', """ +Complex number type composed of two extended-precision floating-point numbers. +""") + +add_newdoc_for_scalar_type('object_', '(value=None, /)', """ +Any Python object. +""") + +add_newdoc_for_scalar_type('str_', '(value="", /, *args, **kwargs)', r""" +A unicode string. + +This type strips trailing null codepoints. + +>>> s = np.str_("abc\x00") +>>> s +'abc' + +Unlike the builtin :class:`str`, this supports the +:ref:`python:bufferobjects`, exposing its contents as UCS4: + +>>> m = memoryview(np.str_("abc")) +>>> m.format +'3w' +>>> m.tobytes() +b'a\x00\x00\x00b\x00\x00\x00c\x00\x00\x00' +""") + +add_newdoc_for_scalar_type('bytes_', '(value="", /, *args, **kwargs)', r""" +A byte string. + +When used in arrays, this type strips trailing null bytes. +""") + +add_newdoc_for_scalar_type('void', '(length_or_data, /, dtype=None)', r""" +np.void(length_or_data, /, dtype=None) + +Create a new structured or unstructured void scalar. + +Parameters +---------- +length_or_data : int, array-like, bytes-like, object + One of multiple meanings (see notes). The length or + bytes data of an unstructured void. Or alternatively, + the data to be stored in the new scalar when `dtype` + is provided. + This can be an array-like, in which case an array may + be returned. +dtype : dtype, optional + If provided the dtype of the new scalar. This dtype must + be "void" dtype (i.e. a structured or unstructured void, + see also :ref:`defining-structured-types`). + + .. versionadded:: 1.24 + +Notes +----- +For historical reasons and because void scalars can represent both +arbitrary byte data and structured dtypes, the void constructor +has three calling conventions: + +1. ``np.void(5)`` creates a ``dtype="V5"`` scalar filled with five + ``\0`` bytes. The 5 can be a Python or NumPy integer. +2. ``np.void(b"bytes-like")`` creates a void scalar from the byte string. + The dtype itemsize will match the byte string length, here ``"V10"``. +3. When a ``dtype=`` is passed the call is roughly the same as an + array creation. However, a void scalar rather than array is returned. + +Please see the examples which show all three different conventions. + +Examples +-------- +>>> np.void(5) +np.void(b'\x00\x00\x00\x00\x00') +>>> np.void(b'abcd') +np.void(b'\x61\x62\x63\x64') +>>> np.void((3.2, b'eggs'), dtype="d,S5") +np.void((3.2, b'eggs'), dtype=[('f0', '>> np.void(3, dtype=[('x', np.int8), ('y', np.int8)]) +np.void((3, 3), dtype=[('x', 'i1'), ('y', 'i1')]) +""") + +add_newdoc_for_scalar_type('datetime64', '(value=None, /, *args)', """ +If created from a 64-bit integer, it represents an offset from ``1970-01-01T00:00:00``. +If created from string, the string can be in ISO 8601 date or datetime format. + +When parsing a string to create a datetime object, if the string contains +a trailing timezone (A 'Z' or a timezone offset), the timezone will be +dropped and a User Warning is given. + +Datetime64 objects should be considered to be UTC and therefore have an +offset of +0000. + +>>> np.datetime64(10, 'Y') +np.datetime64('1980') +>>> np.datetime64('1980', 'Y') +np.datetime64('1980') +>>> np.datetime64(10, 'D') +np.datetime64('1970-01-11') + +See :ref:`arrays.datetime` for more information. +""") + +add_newdoc_for_scalar_type('timedelta64', '(value=0, /, *args)', """ +A timedelta stored as a 64-bit integer. + +See :ref:`arrays.datetime` for more information. +""") add_newdoc('numpy._core.numerictypes', "integer", ('is_integer', """ + is_integer($self, /) + -- + integer.is_integer() -> bool Return ``True`` if the number is finite with integral value. @@ -339,6 +320,9 @@ def add_newdoc_for_scalar_type(obj, fixed_aliases, doc): for float_name in ('half', 'single', 'double', 'longdouble'): add_newdoc('numpy._core.numerictypes', float_name, ('as_integer_ratio', f""" + as_integer_ratio($self, /) + -- + {float_name}.as_integer_ratio() -> (int, int) Return a pair of integers, whose ratio is exactly equal to the original @@ -355,6 +339,9 @@ def add_newdoc_for_scalar_type(obj, fixed_aliases, doc): add_newdoc('numpy._core.numerictypes', float_name, ('is_integer', f""" + is_integer($self, /) + -- + {float_name}.is_integer() -> bool Return ``True`` if the floating point number is finite with integral @@ -371,10 +358,14 @@ def add_newdoc_for_scalar_type(obj, fixed_aliases, doc): """)) for int_name in ('int8', 'uint8', 'int16', 'uint16', 'int32', 'uint32', - 'int64', 'uint64', 'int64', 'uint64', 'int64', 'uint64'): + 'int64', 'uint64', 'int64', 'uint64', 'int64', 'uint64', + 'longlong', 'ulonglong'): # Add negative examples for signed cases by checking typecode add_newdoc('numpy._core.numerictypes', int_name, ('bit_count', f""" + bit_count($self, /) + -- + {int_name}.bit_count() -> int Computes the number of 1-bits in the absolute value of the input. diff --git a/numpy/_core/_add_newdocs_scalars.pyi b/numpy/_core/_add_newdocs_scalars.pyi index 4a06c9b07d74..241f4a00bd45 100644 --- a/numpy/_core/_add_newdocs_scalars.pyi +++ b/numpy/_core/_add_newdocs_scalars.pyi @@ -1,4 +1,3 @@ -from collections.abc import Iterable from typing import Final import numpy as np @@ -8,9 +7,10 @@ _system: Final[str] = ... _machine: Final[str] = ... _doc_alias_string: Final[str] = ... _bool_docstring: Final[str] = ... +bool_name: str = ... int_name: str = ... float_name: str = ... def numeric_type_aliases(aliases: list[tuple[str, str]]) -> list[tuple[type[np.number], str, str]]: ... -def add_newdoc_for_scalar_type(obj: str, fixed_aliases: Iterable[str], doc: str) -> None: ... +def add_newdoc_for_scalar_type(name: str, text_signature: str, doc: str) -> None: ... def _get_platform_and_machine() -> tuple[str, str]: ... diff --git a/numpy/_core/_asarray.py b/numpy/_core/_asarray.py index 613c5cf57060..edaff5222f69 100644 --- a/numpy/_core/_asarray.py +++ b/numpy/_core/_asarray.py @@ -4,11 +4,7 @@ `require` fits this category despite its name not matching this pattern. """ from .multiarray import array, asanyarray -from .overrides import ( - array_function_dispatch, - finalize_array_function_like, - set_module, -) +from .overrides import array_function_dispatch, finalize_array_function_like, set_module __all__ = ["require"] diff --git a/numpy/_core/_asarray.pyi b/numpy/_core/_asarray.pyi index a4bee00489fb..07adc83fbcff 100644 --- a/numpy/_core/_asarray.pyi +++ b/numpy/_core/_asarray.pyi @@ -1,41 +1,41 @@ from collections.abc import Iterable -from typing import Any, Literal, TypeAlias, TypeVar, overload +from typing import Any, Literal, overload from numpy._typing import DTypeLike, NDArray, _SupportsArrayFunc -_ArrayT = TypeVar("_ArrayT", bound=NDArray[Any]) +__all__ = ["require"] -_Requirements: TypeAlias = Literal[ +type _Requirements = Literal[ "C", "C_CONTIGUOUS", "CONTIGUOUS", "F", "F_CONTIGUOUS", "FORTRAN", "A", "ALIGNED", "W", "WRITEABLE", "O", "OWNDATA" ] -_E: TypeAlias = Literal["E", "ENSUREARRAY"] -_RequirementsWithE: TypeAlias = _Requirements | _E +type _E = Literal["E", "ENSUREARRAY"] +type _RequirementsWithE = _Requirements | _E @overload -def require( - a: _ArrayT, - dtype: None = ..., - requirements: _Requirements | Iterable[_Requirements] | None = ..., +def require[ArrayT: NDArray[Any]]( + a: ArrayT, + dtype: None = None, + requirements: _Requirements | Iterable[_Requirements] | None = None, *, - like: _SupportsArrayFunc = ... -) -> _ArrayT: ... + like: _SupportsArrayFunc | None = None +) -> ArrayT: ... @overload def require( a: object, - dtype: DTypeLike = ..., - requirements: _E | Iterable[_RequirementsWithE] = ..., + dtype: DTypeLike | None = None, + requirements: _E | Iterable[_RequirementsWithE] | None = None, *, - like: _SupportsArrayFunc = ... + like: _SupportsArrayFunc | None = None ) -> NDArray[Any]: ... @overload def require( a: object, - dtype: DTypeLike = ..., - requirements: _Requirements | Iterable[_Requirements] | None = ..., + dtype: DTypeLike | None = None, + requirements: _Requirements | Iterable[_Requirements] | None = None, *, - like: _SupportsArrayFunc = ... + like: _SupportsArrayFunc | None = None ) -> NDArray[Any]: ... diff --git a/numpy/_core/_dtype.pyi b/numpy/_core/_dtype.pyi index 6cdd77b22e07..4d34ae9efb99 100644 --- a/numpy/_core/_dtype.pyi +++ b/numpy/_core/_dtype.pyi @@ -1,15 +1,11 @@ -from typing import Final, TypeAlias, TypedDict, overload, type_check_only -from typing import Literal as L - -from typing_extensions import ReadOnly, TypeVar +from typing import Final, Literal as L, TypedDict, overload, type_check_only +from typing_extensions import ReadOnly import numpy as np ### -_T = TypeVar("_T") - -_Name: TypeAlias = L["uint", "int", "complex", "float", "bool", "void", "object", "datetime", "timedelta", "bytes", "str"] +type _Name = L["uint", "int", "complex", "float", "bool", "void", "object", "datetime", "timedelta", "bytes", "str"] @type_check_only class _KindToStemType(TypedDict): @@ -52,7 +48,7 @@ def _name_get(dtype: np.dtype) -> str: ... # @overload -def _unpack_field(dtype: np.dtype, offset: int, title: _T) -> tuple[np.dtype, int, _T]: ... +def _unpack_field[T](dtype: np.dtype, offset: int, title: T) -> tuple[np.dtype, int, T]: ... @overload def _unpack_field(dtype: np.dtype, offset: int, title: None = None) -> tuple[np.dtype, int, None]: ... def _aligned_offset(offset: int, alignment: int) -> int: ... diff --git a/numpy/_core/_exceptions.pyi b/numpy/_core/_exceptions.pyi index 02637a17b6a8..00c1cdbaa575 100644 --- a/numpy/_core/_exceptions.pyi +++ b/numpy/_core/_exceptions.pyi @@ -1,15 +1,8 @@ from collections.abc import Iterable -from typing import Any, Final, TypeVar, overload +from typing import Any, Final, overload import numpy as np from numpy import _CastingKind -from numpy._utils import set_module as set_module - -### - -_T = TypeVar("_T") -_TupleT = TypeVar("_TupleT", bound=tuple[()] | tuple[Any, Any, *tuple[Any, ...]]) -_ExceptionT = TypeVar("_ExceptionT", bound=Exception) ### @@ -49,7 +42,7 @@ class _ArrayMemoryError(MemoryError): def _size_to_string(num_bytes: int) -> str: ... @overload -def _unpack_tuple(tup: tuple[_T]) -> _T: ... +def _unpack_tuple[T](tup: tuple[T]) -> T: ... @overload -def _unpack_tuple(tup: _TupleT) -> _TupleT: ... -def _display_as_base(cls: type[_ExceptionT]) -> type[_ExceptionT]: ... +def _unpack_tuple[TupleT: tuple[()] | tuple[Any, Any, *tuple[Any, ...]]](tup: TupleT) -> TupleT: ... +def _display_as_base[ExceptionT: Exception](cls: type[ExceptionT]) -> type[ExceptionT]: ... diff --git a/numpy/_core/_internal.py b/numpy/_core/_internal.py index e00e1b2c1f60..7c64daf30dbd 100644 --- a/numpy/_core/_internal.py +++ b/numpy/_core/_internal.py @@ -365,46 +365,6 @@ def _as_parameter_(self): """ return self.data_as(ctypes.c_void_p) - # Numpy 1.21.0, 2021-05-18 - - def get_data(self): - """Deprecated getter for the `_ctypes.data` property. - - .. deprecated:: 1.21 - """ - warnings.warn('"get_data" is deprecated. Use "data" instead', - DeprecationWarning, stacklevel=2) - return self.data - - def get_shape(self): - """Deprecated getter for the `_ctypes.shape` property. - - .. deprecated:: 1.21 - """ - warnings.warn('"get_shape" is deprecated. Use "shape" instead', - DeprecationWarning, stacklevel=2) - return self.shape - - def get_strides(self): - """Deprecated getter for the `_ctypes.strides` property. - - .. deprecated:: 1.21 - """ - warnings.warn('"get_strides" is deprecated. Use "strides" instead', - DeprecationWarning, stacklevel=2) - return self.strides - - def get_as_parameter(self): - """Deprecated getter for the `_ctypes._as_parameter_` property. - - .. deprecated:: 1.21 - """ - warnings.warn( - '"get_as_parameter" is deprecated. Use "_as_parameter_" instead', - DeprecationWarning, stacklevel=2, - ) - return self._as_parameter_ - def _newnames(datatype, order): """ @@ -895,6 +855,8 @@ def _ufunc_doc_signature_formatter(ufunc): Builds a signature string which resembles PEP 457 This is used to construct the first line of the docstring + + Keep in sync with `_ufunc_inspect_signature_builder`. """ # input arguments are simple @@ -933,6 +895,54 @@ def _ufunc_doc_signature_formatter(ufunc): return f'{ufunc.__name__}({in_args}{out_args}, *{kwargs})' +def _ufunc_inspect_signature_builder(ufunc): + """ + Builds a ``__signature__`` string. + + Should be kept in sync with `_ufunc_doc_signature_formatter`. + """ + + from inspect import Parameter, Signature + + params = [] + + # positional-only input parameters + if ufunc.nin == 1: + params.append(Parameter("x", Parameter.POSITIONAL_ONLY)) + else: + params.extend( + Parameter(f"x{i}", Parameter.POSITIONAL_ONLY) + for i in range(1, ufunc.nin + 1) + ) + + # for the sake of simplicity, we only consider a single output parameter + if ufunc.nout == 1: + out_default = None + else: + out_default = (None,) * ufunc.nout + params.append( + Parameter("out", Parameter.POSITIONAL_OR_KEYWORD, default=out_default), + ) + + if ufunc.signature is None: + params.append(Parameter("where", Parameter.KEYWORD_ONLY, default=True)) + else: + # NOTE: not all gufuncs support the `axis` parameters + params.append(Parameter("axes", Parameter.KEYWORD_ONLY, default=_NoValue)) + params.append(Parameter("axis", Parameter.KEYWORD_ONLY, default=_NoValue)) + params.append(Parameter("keepdims", Parameter.KEYWORD_ONLY, default=False)) + + params.extend(( + Parameter("casting", Parameter.KEYWORD_ONLY, default='same_kind'), + Parameter("order", Parameter.KEYWORD_ONLY, default='K'), + Parameter("dtype", Parameter.KEYWORD_ONLY, default=None), + Parameter("subok", Parameter.KEYWORD_ONLY, default=True), + Parameter("signature", Parameter.KEYWORD_ONLY, default=None), + )) + + return Signature(params) + + def npy_ctypes_check(cls): # determine if a class comes from ctypes, in order to work around # a bug in the buffer protocol for those objects, bpo-10746 diff --git a/numpy/_core/_internal.pyi b/numpy/_core/_internal.pyi index 3038297b6328..179e077629b6 100644 --- a/numpy/_core/_internal.pyi +++ b/numpy/_core/_internal.pyi @@ -2,16 +2,12 @@ import ctypes as ct import re from collections.abc import Callable, Iterable from typing import Any, Final, Generic, Self, overload - -from typing_extensions import TypeVar, deprecated +from typing_extensions import TypeVar import numpy as np import numpy.typing as npt from numpy.ctypeslib import c_intp -_CastT = TypeVar("_CastT", bound=ct._CanCastTo) -_T_co = TypeVar("_T_co", covariant=True) -_CT = TypeVar("_CT", bound=ct._CData) _PT_co = TypeVar("_PT_co", bound=int | None, default=None, covariant=True) ### @@ -44,28 +40,18 @@ class _ctypes(Generic[_PT_co]): def _as_parameter_(self) -> ct.c_void_p: ... # - def data_as(self, /, obj: type[_CastT]) -> _CastT: ... - def shape_as(self, /, obj: type[_CT]) -> ct.Array[_CT]: ... - def strides_as(self, /, obj: type[_CT]) -> ct.Array[_CT]: ... - - # - @deprecated('"get_data" is deprecated. Use "data" instead') - def get_data(self, /) -> _PT_co: ... - @deprecated('"get_shape" is deprecated. Use "shape" instead') - def get_shape(self, /) -> ct.Array[c_intp]: ... - @deprecated('"get_strides" is deprecated. Use "strides" instead') - def get_strides(self, /) -> ct.Array[c_intp]: ... - @deprecated('"get_as_parameter" is deprecated. Use "_as_parameter_" instead') - def get_as_parameter(self, /) -> ct.c_void_p: ... + def data_as[CastT: ct._CanCastTo](self, /, obj: type[CastT]) -> CastT: ... + def shape_as[CT: ct._CData](self, /, obj: type[CT]) -> ct.Array[CT]: ... + def strides_as[CT: ct._CData](self, /, obj: type[CT]) -> ct.Array[CT]: ... -class dummy_ctype(Generic[_T_co]): - _cls: type[_T_co] +class dummy_ctype[T_co]: + _cls: type[T_co] - def __init__(self, /, cls: type[_T_co]) -> None: ... + def __init__(self, /, cls: type[T_co]) -> None: ... def __eq__(self, other: Self, /) -> bool: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride] def __ne__(self, other: Self, /) -> bool: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride] def __mul__(self, other: object, /) -> Self: ... - def __call__(self, /, *other: object) -> _T_co: ... + def __call__(self, /, *other: object) -> T_co: ... def array_ufunc_errmsg_formatter(dummy: object, ufunc: np.ufunc, method: str, *inputs: object, **kwargs: object) -> str: ... def array_function_errmsg_formatter(public_api: Callable[..., object], types: Iterable[str]) -> str: ... diff --git a/numpy/_core/_machar.py b/numpy/_core/_machar.py deleted file mode 100644 index b49742a15802..000000000000 --- a/numpy/_core/_machar.py +++ /dev/null @@ -1,355 +0,0 @@ -""" -Machine arithmetic - determine the parameters of the -floating-point arithmetic system - -Author: Pearu Peterson, September 2003 - -""" -__all__ = ['MachAr'] - -from ._ufunc_config import errstate -from .fromnumeric import any - -# Need to speed this up...especially for longdouble - -# Deprecated 2021-10-20, NumPy 1.22 -class MachAr: - """ - Diagnosing machine parameters. - - Attributes - ---------- - ibeta : int - Radix in which numbers are represented. - it : int - Number of base-`ibeta` digits in the floating point mantissa M. - machep : int - Exponent of the smallest (most negative) power of `ibeta` that, - added to 1.0, gives something different from 1.0 - eps : float - Floating-point number ``beta**machep`` (floating point precision) - negep : int - Exponent of the smallest power of `ibeta` that, subtracted - from 1.0, gives something different from 1.0. - epsneg : float - Floating-point number ``beta**negep``. - iexp : int - Number of bits in the exponent (including its sign and bias). - minexp : int - Smallest (most negative) power of `ibeta` consistent with there - being no leading zeros in the mantissa. - xmin : float - Floating-point number ``beta**minexp`` (the smallest [in - magnitude] positive floating point number with full precision). - maxexp : int - Smallest (positive) power of `ibeta` that causes overflow. - xmax : float - ``(1-epsneg) * beta**maxexp`` (the largest [in magnitude] - usable floating value). - irnd : int - In ``range(6)``, information on what kind of rounding is done - in addition, and on how underflow is handled. - ngrd : int - Number of 'guard digits' used when truncating the product - of two mantissas to fit the representation. - epsilon : float - Same as `eps`. - tiny : float - An alias for `smallest_normal`, kept for backwards compatibility. - huge : float - Same as `xmax`. - precision : float - ``- int(-log10(eps))`` - resolution : float - ``- 10**(-precision)`` - smallest_normal : float - The smallest positive floating point number with 1 as leading bit in - the mantissa following IEEE-754. Same as `xmin`. - smallest_subnormal : float - The smallest positive floating point number with 0 as leading bit in - the mantissa following IEEE-754. - - Parameters - ---------- - float_conv : function, optional - Function that converts an integer or integer array to a float - or float array. Default is `float`. - int_conv : function, optional - Function that converts a float or float array to an integer or - integer array. Default is `int`. - float_to_float : function, optional - Function that converts a float array to float. Default is `float`. - Note that this does not seem to do anything useful in the current - implementation. - float_to_str : function, optional - Function that converts a single float to a string. Default is - ``lambda v:'%24.16e' %v``. - title : str, optional - Title that is printed in the string representation of `MachAr`. - - See Also - -------- - finfo : Machine limits for floating point types. - iinfo : Machine limits for integer types. - - References - ---------- - .. [1] Press, Teukolsky, Vetterling and Flannery, - "Numerical Recipes in C++," 2nd ed, - Cambridge University Press, 2002, p. 31. - - """ - - def __init__(self, float_conv=float, int_conv=int, - float_to_float=float, - float_to_str=lambda v: f'{v:24.16e}', - title='Python floating point number'): - """ - - float_conv - convert integer to float (array) - int_conv - convert float (array) to integer - float_to_float - convert float array to float - float_to_str - convert array float to str - title - description of used floating point numbers - - """ - # We ignore all errors here because we are purposely triggering - # underflow to detect the properties of the running arch. - with errstate(under='ignore'): - self._do_init(float_conv, int_conv, float_to_float, float_to_str, title) - - def _do_init(self, float_conv, int_conv, float_to_float, float_to_str, title): - max_iterN = 10000 - msg = "Did not converge after %d tries with %s" - one = float_conv(1) - two = one + one - zero = one - one - - # Do we really need to do this? Aren't they 2 and 2.0? - # Determine ibeta and beta - a = one - for _ in range(max_iterN): - a = a + a - temp = a + one - temp1 = temp - a - if any(temp1 - one != zero): - break - else: - raise RuntimeError(msg % (_, one.dtype)) - b = one - for _ in range(max_iterN): - b = b + b - temp = a + b - itemp = int_conv(temp - a) - if any(itemp != 0): - break - else: - raise RuntimeError(msg % (_, one.dtype)) - ibeta = itemp - beta = float_conv(ibeta) - - # Determine it and irnd - it = -1 - b = one - for _ in range(max_iterN): - it = it + 1 - b = b * beta - temp = b + one - temp1 = temp - b - if any(temp1 - one != zero): - break - else: - raise RuntimeError(msg % (_, one.dtype)) - - betah = beta / two - a = one - for _ in range(max_iterN): - a = a + a - temp = a + one - temp1 = temp - a - if any(temp1 - one != zero): - break - else: - raise RuntimeError(msg % (_, one.dtype)) - temp = a + betah - irnd = 0 - if any(temp - a != zero): - irnd = 1 - tempa = a + beta - temp = tempa + betah - if irnd == 0 and any(temp - tempa != zero): - irnd = 2 - - # Determine negep and epsneg - negep = it + 3 - betain = one / beta - a = one - for i in range(negep): - a = a * betain - b = a - for _ in range(max_iterN): - temp = one - a - if any(temp - one != zero): - break - a = a * beta - negep = negep - 1 - # Prevent infinite loop on PPC with gcc 4.0: - if negep < 0: - raise RuntimeError("could not determine machine tolerance " - "for 'negep', locals() -> %s" % (locals())) - else: - raise RuntimeError(msg % (_, one.dtype)) - negep = -negep - epsneg = a - - # Determine machep and eps - machep = - it - 3 - a = b - - for _ in range(max_iterN): - temp = one + a - if any(temp - one != zero): - break - a = a * beta - machep = machep + 1 - else: - raise RuntimeError(msg % (_, one.dtype)) - eps = a - - # Determine ngrd - ngrd = 0 - temp = one + eps - if irnd == 0 and any(temp * one - one != zero): - ngrd = 1 - - # Determine iexp - i = 0 - k = 1 - z = betain - t = one + eps - nxres = 0 - for _ in range(max_iterN): - y = z - z = y * y - a = z * one # Check here for underflow - temp = z * t - if any(a + a == zero) or any(abs(z) >= y): - break - temp1 = temp * betain - if any(temp1 * beta == z): - break - i = i + 1 - k = k + k - else: - raise RuntimeError(msg % (_, one.dtype)) - if ibeta != 10: - iexp = i + 1 - mx = k + k - else: - iexp = 2 - iz = ibeta - while k >= iz: - iz = iz * ibeta - iexp = iexp + 1 - mx = iz + iz - 1 - - # Determine minexp and xmin - for _ in range(max_iterN): - xmin = y - y = y * betain - a = y * one - temp = y * t - if any((a + a) != zero) and any(abs(y) < xmin): - k = k + 1 - temp1 = temp * betain - if any(temp1 * beta == y) and any(temp != y): - nxres = 3 - xmin = y - break - else: - break - else: - raise RuntimeError(msg % (_, one.dtype)) - minexp = -k - - # Determine maxexp, xmax - if mx <= k + k - 3 and ibeta != 10: - mx = mx + mx - iexp = iexp + 1 - maxexp = mx + minexp - irnd = irnd + nxres - if irnd >= 2: - maxexp = maxexp - 2 - i = maxexp + minexp - if ibeta == 2 and not i: - maxexp = maxexp - 1 - if i > 20: - maxexp = maxexp - 1 - if any(a != y): - maxexp = maxexp - 2 - xmax = one - epsneg - if any(xmax * one != xmax): - xmax = one - beta * epsneg - xmax = xmax / (xmin * beta * beta * beta) - i = maxexp + minexp + 3 - for j in range(i): - if ibeta == 2: - xmax = xmax + xmax - else: - xmax = xmax * beta - - smallest_subnormal = abs(xmin / beta ** (it)) - - self.ibeta = ibeta - self.it = it - self.negep = negep - self.epsneg = float_to_float(epsneg) - self._str_epsneg = float_to_str(epsneg) - self.machep = machep - self.eps = float_to_float(eps) - self._str_eps = float_to_str(eps) - self.ngrd = ngrd - self.iexp = iexp - self.minexp = minexp - self.xmin = float_to_float(xmin) - self._str_xmin = float_to_str(xmin) - self.maxexp = maxexp - self.xmax = float_to_float(xmax) - self._str_xmax = float_to_str(xmax) - self.irnd = irnd - - self.title = title - # Commonly used parameters - self.epsilon = self.eps - self.tiny = self.xmin - self.huge = self.xmax - self.smallest_normal = self.xmin - self._str_smallest_normal = float_to_str(self.xmin) - self.smallest_subnormal = float_to_float(smallest_subnormal) - self._str_smallest_subnormal = float_to_str(smallest_subnormal) - - import math - self.precision = int(-math.log10(float_to_float(self.eps))) - ten = two + two + two + two + two - resolution = ten ** (-self.precision) - self.resolution = float_to_float(resolution) - self._str_resolution = float_to_str(resolution) - - def __str__(self): - fmt = ( - 'Machine parameters for %(title)s\n' - '---------------------------------------------------------------------\n' - 'ibeta=%(ibeta)s it=%(it)s iexp=%(iexp)s ngrd=%(ngrd)s irnd=%(irnd)s\n' - 'machep=%(machep)s eps=%(_str_eps)s (beta**machep == epsilon)\n' - 'negep =%(negep)s epsneg=%(_str_epsneg)s (beta**epsneg)\n' - 'minexp=%(minexp)s xmin=%(_str_xmin)s (beta**minexp == tiny)\n' - 'maxexp=%(maxexp)s xmax=%(_str_xmax)s ((1-epsneg)*beta**maxexp == huge)\n' - 'smallest_normal=%(smallest_normal)s ' - 'smallest_subnormal=%(smallest_subnormal)s\n' - '---------------------------------------------------------------------\n' - ) - return fmt % self.__dict__ - - -if __name__ == '__main__': - print(MachAr()) diff --git a/numpy/_core/_machar.pyi b/numpy/_core/_machar.pyi deleted file mode 100644 index 02637a17b6a8..000000000000 --- a/numpy/_core/_machar.pyi +++ /dev/null @@ -1,55 +0,0 @@ -from collections.abc import Iterable -from typing import Any, Final, TypeVar, overload - -import numpy as np -from numpy import _CastingKind -from numpy._utils import set_module as set_module - -### - -_T = TypeVar("_T") -_TupleT = TypeVar("_TupleT", bound=tuple[()] | tuple[Any, Any, *tuple[Any, ...]]) -_ExceptionT = TypeVar("_ExceptionT", bound=Exception) - -### - -class UFuncTypeError(TypeError): - ufunc: Final[np.ufunc] - def __init__(self, /, ufunc: np.ufunc) -> None: ... - -class _UFuncNoLoopError(UFuncTypeError): - dtypes: tuple[np.dtype, ...] - def __init__(self, /, ufunc: np.ufunc, dtypes: Iterable[np.dtype]) -> None: ... - -class _UFuncBinaryResolutionError(_UFuncNoLoopError): - dtypes: tuple[np.dtype, np.dtype] - def __init__(self, /, ufunc: np.ufunc, dtypes: Iterable[np.dtype]) -> None: ... - -class _UFuncCastingError(UFuncTypeError): - casting: Final[_CastingKind] - from_: Final[np.dtype] - to: Final[np.dtype] - def __init__(self, /, ufunc: np.ufunc, casting: _CastingKind, from_: np.dtype, to: np.dtype) -> None: ... - -class _UFuncInputCastingError(_UFuncCastingError): - in_i: Final[int] - def __init__(self, /, ufunc: np.ufunc, casting: _CastingKind, from_: np.dtype, to: np.dtype, i: int) -> None: ... - -class _UFuncOutputCastingError(_UFuncCastingError): - out_i: Final[int] - def __init__(self, /, ufunc: np.ufunc, casting: _CastingKind, from_: np.dtype, to: np.dtype, i: int) -> None: ... - -class _ArrayMemoryError(MemoryError): - shape: tuple[int, ...] - dtype: np.dtype - def __init__(self, /, shape: tuple[int, ...], dtype: np.dtype) -> None: ... - @property - def _total_size(self) -> int: ... - @staticmethod - def _size_to_string(num_bytes: int) -> str: ... - -@overload -def _unpack_tuple(tup: tuple[_T]) -> _T: ... -@overload -def _unpack_tuple(tup: _TupleT) -> _TupleT: ... -def _display_as_base(cls: type[_ExceptionT]) -> type[_ExceptionT]: ... diff --git a/numpy/_core/_methods.py b/numpy/_core/_methods.py index 21ad7900016b..1c29831bca20 100644 --- a/numpy/_core/_methods.py +++ b/numpy/_core/_methods.py @@ -9,9 +9,7 @@ from contextlib import nullcontext import numpy as np -from numpy._core import multiarray as mu -from numpy._core import numerictypes as nt -from numpy._core import umath as um +from numpy._core import multiarray as mu, numerictypes as nt, umath as um from numpy._core.multiarray import asanyarray from numpy._globals import _NoValue @@ -121,7 +119,7 @@ def _mean(a, axis=None, dtype=None, out=None, keepdims=False, *, where=True): rcount = _count_reduce_items(arr, axis, keepdims=keepdims, where=where) if rcount == 0 if where is True else umr_any(rcount == 0, axis=None): - warnings.warn("Mean of empty slice.", RuntimeWarning, stacklevel=2) + warnings.warn("Mean of empty slice", RuntimeWarning, stacklevel=2) # Cast bool, unsigned int, and int to float64 by default if dtype is None: @@ -187,15 +185,14 @@ def _var(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False, *, # Compute sum of squared deviations from mean # Note that x may not be inexact and that we need it to be an array, # not a scalar. - x = asanyarray(arr - arrmean) - + x = um.subtract(arr, arrmean, out=...) if issubclass(arr.dtype.type, (nt.floating, nt.integer)): - x = um.multiply(x, x, out=x) + x = um.square(x, out=x) # Fast-paths for built-in complex types - elif x.dtype in _complex_to_float: - xv = x.view(dtype=(_complex_to_float[x.dtype], (2,))) - um.multiply(xv, xv, out=xv) - x = um.add(xv[..., 0], xv[..., 1], out=x.real).real + elif (_float_dtype := _complex_to_float.get(x.dtype)) is not None: + xv = x.view(dtype=(_float_dtype, (2,))) + um.square(xv, out=xv) + x = um.add(xv[..., 0], xv[..., 1], out=x.real) # Most general case; includes handling object arrays containing imaginary # numbers and complex types with non-native byteorder else: diff --git a/numpy/_core/_methods.pyi b/numpy/_core/_methods.pyi index 3c80683f003b..651c78d3530b 100644 --- a/numpy/_core/_methods.pyi +++ b/numpy/_core/_methods.pyi @@ -1,5 +1,5 @@ from collections.abc import Callable -from typing import Any, Concatenate, TypeAlias +from typing import Any, Concatenate import numpy as np @@ -7,7 +7,7 @@ from . import _exceptions as _exceptions ### -_Reduce2: TypeAlias = Callable[Concatenate[object, ...], Any] +type _Reduce2 = Callable[Concatenate[object, ...], Any] ### diff --git a/numpy/_core/_simd.pyi b/numpy/_core/_simd.pyi index 70bb7077797e..0ba7d78eeb87 100644 --- a/numpy/_core/_simd.pyi +++ b/numpy/_core/_simd.pyi @@ -8,6 +8,13 @@ AVX2: ModuleType | None = ... AVX512F: ModuleType | None = ... AVX512_SKX: ModuleType | None = ... +# NOTE: these 2 are only defined on systems with an arm processor +ASIMD: ModuleType | None = ... +NEON: ModuleType | None = ... + +# NOTE: This is only defined on systems with an riscv64 processor. +RVV: ModuleType | None = ... + baseline: ModuleType | None = ... @type_check_only @@ -17,6 +24,9 @@ class SimdTargets(TypedDict): FMA3: ModuleType | None AVX512F: ModuleType | None AVX512_SKX: ModuleType | None + ASIMD: ModuleType | None + NEON: ModuleType | None + RVV: ModuleType | None baseline: ModuleType | None targets: SimdTargets = ... diff --git a/numpy/_core/_type_aliases.py b/numpy/_core/_type_aliases.py index de6c30953e91..51c8e6ca2677 100644 --- a/numpy/_core/_type_aliases.py +++ b/numpy/_core/_type_aliases.py @@ -36,6 +36,7 @@ for _abstract_type_name in _abstract_type_names: allTypes[_abstract_type_name] = getattr(ma, _abstract_type_name) + del _abstract_type_name for k, v in typeinfo.items(): if k.startswith("NPY_") and v not in c_names_dict: @@ -44,6 +45,8 @@ concrete_type = v.type allTypes[k] = concrete_type sctypeDict[k] = concrete_type + del concrete_type + del k, v _aliases = { "double": "float64", @@ -60,6 +63,7 @@ for k, v in _aliases.items(): sctypeDict[k] = allTypes[v] allTypes[k] = allTypes[v] + del k, v # extra aliases are added only to `sctypeDict` # to support dtype name access, such as`np.dtype("float")` @@ -76,18 +80,21 @@ for k, v in _extra_aliases.items(): sctypeDict[k] = allTypes[v] + del k, v # include extended precision sized aliases for is_complex, full_name in [(False, "longdouble"), (True, "clongdouble")]: - longdouble_type: type = allTypes[full_name] + longdouble_type = allTypes[full_name] - bits: int = dtype(longdouble_type).itemsize * 8 - base_name: str = "complex" if is_complex else "float" - extended_prec_name: str = f"{base_name}{bits}" + bits = dtype(longdouble_type).itemsize * 8 + base_name = "complex" if is_complex else "float" + extended_prec_name = f"{base_name}{bits}" if extended_prec_name not in allTypes: sctypeDict[extended_prec_name] = longdouble_type allTypes[extended_prec_name] = longdouble_type + del is_complex, full_name, longdouble_type, bits, base_name, extended_prec_name + #################### # Building `sctypes` @@ -110,10 +117,15 @@ ]: if issubclass(concrete_type, abstract_type): sctypes[type_group].add(concrete_type) + del type_group, abstract_type break + del type_info, concrete_type + # sort sctype groups by bitsize for sctype_key in sctypes.keys(): sctype_list = list(sctypes[sctype_key]) sctype_list.sort(key=lambda x: dtype(x).itemsize) sctypes[sctype_key] = sctype_list + + del sctype_key, sctype_list diff --git a/numpy/_core/_type_aliases.pyi b/numpy/_core/_type_aliases.pyi index 3c9dac7a1202..c7efe989caa5 100644 --- a/numpy/_core/_type_aliases.pyi +++ b/numpy/_core/_type_aliases.pyi @@ -1,19 +1,8 @@ from collections.abc import Collection -from typing import Final, TypeAlias, TypedDict, type_check_only -from typing import Literal as L +from typing import Final, Literal as L, TypedDict, type_check_only import numpy as np -__all__ = ( - "_abstract_type_names", - "_aliases", - "_extra_aliases", - "allTypes", - "c_names_dict", - "sctypeDict", - "sctypes", -) - sctypeDict: Final[dict[str, type[np.generic]]] allTypes: Final[dict[str, type[np.generic]]] @@ -46,7 +35,7 @@ class _CNamesDict(TypedDict): c_names_dict: Final[_CNamesDict] -_AbstractTypeName: TypeAlias = L[ +type _AbstractTypeName = L[ "generic", "flexible", "character", diff --git a/numpy/_core/_ufunc_config.py b/numpy/_core/_ufunc_config.py index 24abecd20652..6a7476670d95 100644 --- a/numpy/_core/_ufunc_config.py +++ b/numpy/_core/_ufunc_config.py @@ -57,6 +57,7 @@ def seterr(all=None, divide=None, over=None, under=None, invalid=None): seterrcall : Set a callback function for the 'call' mode. geterr, geterrcall, errstate + Notes ----- The floating-point exceptions are defined in the IEEE 754 standard [1]_: @@ -68,6 +69,8 @@ def seterr(all=None, divide=None, over=None, under=None, invalid=None): - Invalid operation: result is not an expressible number, typically indicates that a NaN was produced. + **Concurrency note:** see :ref:`fp_error_handling` + .. [1] https://en.wikipedia.org/wiki/IEEE_754 Examples @@ -127,6 +130,8 @@ def geterr(): For complete documentation of the types of floating-point exceptions and treatment options, see `seterr`. + **Concurrency note:** see :doc:`/reference/routines.err` + Examples -------- >>> import numpy as np @@ -172,6 +177,10 @@ def setbufsize(size): bufsize : int Previous size of ufunc buffer in bytes. + Notes + ----- + **Concurrency note:** see :doc:`/reference/routines.err` + Examples -------- When exiting a `numpy.errstate` context manager the bufsize is restored: @@ -187,6 +196,8 @@ def setbufsize(size): 8192 """ + if size < 0: + raise ValueError("buffer size must be non-negative") old = _get_extobj_dict()["bufsize"] extobj = _make_extobj(bufsize=size) _extobj_contextvar.set(extobj) @@ -203,6 +214,12 @@ def getbufsize(): getbufsize : int Size of ufunc buffer in bytes. + Notes + ----- + + **Concurrency note:** see :doc:`/reference/routines.err` + + Examples -------- >>> import numpy as np @@ -254,6 +271,11 @@ def seterrcall(func): -------- seterr, geterr, geterrcall + Notes + ----- + + **Concurrency note:** see :doc:`/reference/routines.err` + Examples -------- Callback upon error: @@ -329,6 +351,8 @@ def geterrcall(): For complete documentation of the types of floating-point exceptions and treatment options, see `seterr`. + **Concurrency note:** see :ref:`fp_error_handling` + Examples -------- >>> import numpy as np @@ -397,6 +421,8 @@ class errstate: For complete documentation of the types of floating-point exceptions and treatment options, see `seterr`. + **Concurrency note:** see :ref:`fp_error_handling` + Examples -------- >>> import numpy as np diff --git a/numpy/_core/_ufunc_config.pyi b/numpy/_core/_ufunc_config.pyi index 1a6613154072..039aa1d51223 100644 --- a/numpy/_core/_ufunc_config.pyi +++ b/numpy/_core/_ufunc_config.pyi @@ -1,13 +1,20 @@ -from collections.abc import Callable -from typing import Any, Literal, TypeAlias, TypedDict, type_check_only - from _typeshed import SupportsWrite +from collections.abc import Callable +from types import TracebackType +from typing import Any, Final, Literal, TypedDict, type_check_only -from numpy import errstate as errstate +__all__ = [ + "seterr", + "geterr", + "setbufsize", + "getbufsize", + "seterrcall", + "geterrcall", + "errstate", +] -_ErrKind: TypeAlias = Literal["ignore", "warn", "raise", "call", "print", "log"] -_ErrFunc: TypeAlias = Callable[[str, int], Any] -_ErrCall: TypeAlias = _ErrFunc | SupportsWrite[str] +type _ErrKind = Literal["ignore", "warn", "raise", "call", "print", "log"] +type _ErrCall = Callable[[str, int], Any] | SupportsWrite[str] @type_check_only class _ErrDict(TypedDict): @@ -16,17 +23,45 @@ class _ErrDict(TypedDict): under: _ErrKind invalid: _ErrKind +### + +class _unspecified: ... + +_Unspecified: Final[_unspecified] + +class errstate: + __slots__ = "_all", "_call", "_divide", "_invalid", "_over", "_token", "_under" + + def __init__( + self, + /, + *, + call: _ErrCall | _unspecified = ..., # = _Unspecified + all: _ErrKind | None = None, + divide: _ErrKind | None = None, + over: _ErrKind | None = None, + under: _ErrKind | None = None, + invalid: _ErrKind | None = None, + ) -> None: ... + def __call__[FuncT: Callable[..., object]](self, /, func: FuncT) -> FuncT: ... + def __enter__(self) -> None: ... + def __exit__( + self, + exc_type: type[BaseException] | None, + exc_value: BaseException | None, + traceback: TracebackType | None, + /, + ) -> None: ... + def seterr( - all: _ErrKind | None = ..., - divide: _ErrKind | None = ..., - over: _ErrKind | None = ..., - under: _ErrKind | None = ..., - invalid: _ErrKind | None = ..., + all: _ErrKind | None = None, + divide: _ErrKind | None = None, + over: _ErrKind | None = None, + under: _ErrKind | None = None, + invalid: _ErrKind | None = None, ) -> _ErrDict: ... def geterr() -> _ErrDict: ... def setbufsize(size: int) -> int: ... def getbufsize() -> int: ... def seterrcall(func: _ErrCall | None) -> _ErrCall | None: ... def geterrcall() -> _ErrCall | None: ... - -# See `numpy/__init__.pyi` for the `errstate` class and `no_nep5_warnings` diff --git a/numpy/_core/_umath_tests.pyi b/numpy/_core/_umath_tests.pyi new file mode 100644 index 000000000000..696cec3b755e --- /dev/null +++ b/numpy/_core/_umath_tests.pyi @@ -0,0 +1,47 @@ +# undocumented internal testing module for ufunc features, defined in +# numpy/_core/src/umath/_umath_tests.c.src + +from typing import Final, Literal as L, TypedDict, type_check_only + +import numpy as np +from numpy._typing import _GUFunc_Nin2_Nout1, _UFunc_Nin1_Nout1, _UFunc_Nin2_Nout1 + +@type_check_only +class _TestDispatchResult(TypedDict): + func: str # e.g. 'func_AVX2' + var: str # e.g. 'var_AVX2' + func_xb: str # e.g. 'func_AVX2' + var_xb: str # e.g. 'var_AVX2' + all: list[str] # e.g. ['func_AVX2', 'func_SSE41', 'func'] + +### + +# undocumented +def test_signature( + nin: int, nout: int, signature: str, / +) -> tuple[ + L[0, 1], # core_enabled (0 for scalar ufunc; 1 for generalized ufunc) + tuple[int, ...] | None, # core_num_dims + tuple[int, ...] | None, # core_dim_ixs + tuple[int, ...] | None, # core_dim_flags + tuple[int, ...] | None, # core_dim_sizes +]: ... + +# undocumented +def test_dispatch() -> _TestDispatchResult: ... + +# undocumented ufuncs and gufuncs +always_error: Final[_UFunc_Nin2_Nout1[L["always_error"], L[1], None]] = ... +always_error_unary: Final[_UFunc_Nin1_Nout1[L["always_error_unary"], L[1], None]] = ... +always_error_gufunc: Final[_GUFunc_Nin2_Nout1[L["always_error_gufunc"], L[1], None, L["(i),()->()"]]] = ... +inner1d: Final[_GUFunc_Nin2_Nout1[L["inner1d"], L[2], None, L["(i),(i)->()"]]] = ... +innerwt: Final[np.ufunc] = ... # we have no specialized type for 3->1 gufuncs +matrix_multiply: Final[_GUFunc_Nin2_Nout1[L["matrix_multiply"], L[3], None, L["(m,n),(n,p)->(m,p)"]]] = ... +matmul: Final[_GUFunc_Nin2_Nout1[L["matmul"], L[3], None, L["(m?,n),(n,p?)->(m?,p?)"]]] = ... +euclidean_pdist: Final[_GUFunc_Nin2_Nout1[L["euclidean_pdist"], L[2], None, L["(n,d)->(p)"]]] = ... +cumsum: Final[np.ufunc] = ... # we have no specialized type for 1->1 gufuncs +inner1d_no_doc: Final[_GUFunc_Nin2_Nout1[L["inner1d_no_doc"], L[2], None, L["(i),(i)->()"]]] = ... +cross1d: Final[_GUFunc_Nin2_Nout1[L["cross1d"], L[2], None, L["(3),(3)->(3)"]]] = ... +_pickleable_module_global_ufunc: Final[np.ufunc] = ... # 0->0 ufunc; segfaults if called +indexed_negative: Final[_UFunc_Nin1_Nout1[L["indexed_negative"], L[0], L[0]]] = ... # ntypes=0; can't be called +conv1d_full: Final[_GUFunc_Nin2_Nout1[L["conv1d_full"], L[1], None, L["(m),(n)->(p)"]]] = ... diff --git a/numpy/_core/arrayprint.py b/numpy/_core/arrayprint.py index 2a684280610b..96c17285bb3d 100644 --- a/numpy/_core/arrayprint.py +++ b/numpy/_core/arrayprint.py @@ -248,11 +248,15 @@ def set_printoptions(precision=None, threshold=None, edgeitems=None, -------- get_printoptions, printoptions, array2string + Notes ----- - `formatter` is always reset with a call to `set_printoptions`. - Use `printoptions` as a context manager to set the values temporarily. + * ``formatter`` is always reset with a call to `set_printoptions`. + * Use `printoptions` as a context manager to set the values temporarily. + * These print options apply only to NumPy ndarrays, not to scalars. + + **Concurrency note:** see :ref:`text_formatting_options` Examples -------- @@ -352,6 +356,12 @@ def get_printoptions(): For a full description of these options, see `set_printoptions`. + Notes + ----- + These print options apply only to NumPy ndarrays, not to scalars. + + **Concurrency note:** see :ref:`text_formatting_options` + See Also -------- set_printoptions, printoptions @@ -410,6 +420,12 @@ def printoptions(*args, **kwargs): -------- set_printoptions, get_printoptions + Notes + ----- + These print options apply only to NumPy ndarrays, not to scalars. + + **Concurrency note:** see :ref:`text_formatting_options` + """ token = _set_printoptions(*args, **kwargs) @@ -610,18 +626,18 @@ def _array2string(a, options, separator=' ', prefix=""): def _array2string_dispatcher( a, max_line_width=None, precision=None, suppress_small=None, separator=None, prefix=None, - style=None, formatter=None, threshold=None, + *, formatter=None, threshold=None, edgeitems=None, sign=None, floatmode=None, suffix=None, - *, legacy=None): + legacy=None): return (a,) @array_function_dispatch(_array2string_dispatcher, module='numpy') def array2string(a, max_line_width=None, precision=None, suppress_small=None, separator=' ', prefix="", - style=np._NoValue, formatter=None, threshold=None, + *, formatter=None, threshold=None, edgeitems=None, sign=None, floatmode=None, suffix="", - *, legacy=None): + legacy=None): """ Return a string representation of an array. @@ -654,10 +670,6 @@ def array2string(a, max_line_width=None, precision=None, wrapping is forced at the column ``max_line_width - len(suffix)``. It should be noted that the content of prefix and suffix strings are not included in the output. - style : _NoValue, optional - Has no effect, do not use. - - .. deprecated:: 1.14.0 formatter : dict of callables, optional If not None, the keys should indicate the type(s) that the respective formatting function applies to. Callables should return a string. @@ -777,16 +789,8 @@ def array2string(a, max_line_width=None, precision=None, options.update(overrides) if options['legacy'] <= 113: - if style is np._NoValue: - style = repr - if a.shape == () and a.dtype.names is None: - return style(a.item()) - elif style is not np._NoValue: - # Deprecation 11-9-2017 v1.14 - warnings.warn("'style' argument is deprecated and no longer functional" - " except in 1.13 'legacy' mode", - DeprecationWarning, stacklevel=2) + return repr(a.item()) if options['legacy'] > 113: options['linewidth'] -= len(suffix) diff --git a/numpy/_core/arrayprint.pyi b/numpy/_core/arrayprint.pyi index fec03a6f265c..167cc3f3a097 100644 --- a/numpy/_core/arrayprint.pyi +++ b/numpy/_core/arrayprint.pyi @@ -3,21 +3,9 @@ from collections.abc import Callable # Using a private class is by no means ideal, but it is simply a consequence # of a `contextlib.context` returning an instance of aforementioned class from contextlib import _GeneratorContextManager -from typing import ( - Any, - Final, - Literal, - SupportsIndex, - TypeAlias, - TypedDict, - overload, - type_check_only, -) - -from typing_extensions import deprecated +from typing import Any, Final, Literal, SupportsIndex, TypedDict, type_check_only import numpy as np -from numpy._globals import _NoValueType from numpy._typing import NDArray, _CharLike_co, _FloatLike_co __all__ = [ @@ -33,12 +21,12 @@ __all__ = [ ### -_FloatMode: TypeAlias = Literal["fixed", "unique", "maxprec", "maxprec_equal"] -_LegacyNoStyle: TypeAlias = Literal["1.21", "1.25", "2.1", False] -_Legacy: TypeAlias = Literal["1.13", _LegacyNoStyle] -_Sign: TypeAlias = Literal["-", "+", " "] -_Trim: TypeAlias = Literal["k", ".", "0", "-"] -_ReprFunc: TypeAlias = Callable[[NDArray[Any]], str] +type _FloatMode = Literal["fixed", "unique", "maxprec", "maxprec_equal"] +type _LegacyNoStyle = Literal["1.21", "1.25", "2.1", False] +type _Legacy = Literal["1.13", _LegacyNoStyle] +type _Sign = Literal["-", "+", " "] +type _Trim = Literal["k", ".", "0", "-"] +type _ReprFunc = Callable[[NDArray[Any]], str] @type_check_only class _FormatDict(TypedDict, total=False): @@ -78,14 +66,14 @@ class _FormatOptions(TypedDict): __docformat__: Final = "restructuredtext" # undocumented def set_printoptions( - precision: SupportsIndex | None = ..., - threshold: int | None = ..., - edgeitems: int | None = ..., - linewidth: int | None = ..., - suppress: bool | None = ..., - nanstr: str | None = ..., - infstr: str | None = ..., - formatter: _FormatDict | None = ..., + precision: SupportsIndex | None = None, + threshold: int | None = None, + edgeitems: int | None = None, + linewidth: int | None = None, + suppress: bool | None = None, + nanstr: str | None = None, + infstr: str | None = None, + formatter: _FormatDict | None = None, sign: _Sign | None = None, floatmode: _FloatMode | None = None, *, @@ -95,7 +83,6 @@ def set_printoptions( def get_printoptions() -> _FormatOptions: ... # public numpy export -@overload # no style def array2string( a: NDArray[Any], max_line_width: int | None = None, @@ -103,123 +90,48 @@ def array2string( suppress_small: bool | None = None, separator: str = " ", prefix: str = "", - style: _NoValueType = ..., - formatter: _FormatDict | None = None, - threshold: int | None = None, - edgeitems: int | None = None, - sign: _Sign | None = None, - floatmode: _FloatMode | None = None, - suffix: str = "", *, - legacy: _Legacy | None = None, -) -> str: ... -@overload # style= (positional), legacy="1.13" -def array2string( - a: NDArray[Any], - max_line_width: int | None, - precision: SupportsIndex | None, - suppress_small: bool | None, - separator: str, - prefix: str, - style: _ReprFunc, formatter: _FormatDict | None = None, threshold: int | None = None, edgeitems: int | None = None, sign: _Sign | None = None, floatmode: _FloatMode | None = None, suffix: str = "", - *, - legacy: Literal["1.13"], -) -> str: ... -@overload # style= (keyword), legacy="1.13" -def array2string( - a: NDArray[Any], - max_line_width: int | None = None, - precision: SupportsIndex | None = None, - suppress_small: bool | None = None, - separator: str = " ", - prefix: str = "", - *, - style: _ReprFunc, - formatter: _FormatDict | None = None, - threshold: int | None = None, - edgeitems: int | None = None, - sign: _Sign | None = None, - floatmode: _FloatMode | None = None, - suffix: str = "", - legacy: Literal["1.13"], -) -> str: ... -@overload # style= (positional), legacy!="1.13" -@deprecated("'style' argument is deprecated and no longer functional except in 1.13 'legacy' mode") -def array2string( - a: NDArray[Any], - max_line_width: int | None, - precision: SupportsIndex | None, - suppress_small: bool | None, - separator: str, - prefix: str, - style: _ReprFunc, - formatter: _FormatDict | None = None, - threshold: int | None = None, - edgeitems: int | None = None, - sign: _Sign | None = None, - floatmode: _FloatMode | None = None, - suffix: str = "", - *, - legacy: _LegacyNoStyle | None = None, -) -> str: ... -@overload # style= (keyword), legacy="1.13" -@deprecated("'style' argument is deprecated and no longer functional except in 1.13 'legacy' mode") -def array2string( - a: NDArray[Any], - max_line_width: int | None = None, - precision: SupportsIndex | None = None, - suppress_small: bool | None = None, - separator: str = " ", - prefix: str = "", - *, - style: _ReprFunc, - formatter: _FormatDict | None = None, - threshold: int | None = None, - edgeitems: int | None = None, - sign: _Sign | None = None, - floatmode: _FloatMode | None = None, - suffix: str = "", - legacy: _LegacyNoStyle | None = None, + legacy: _Legacy | None = None, ) -> str: ... def format_float_scientific( x: _FloatLike_co, - precision: int | None = ..., - unique: bool = ..., + precision: int | None = None, + unique: bool = True, trim: _Trim = "k", - sign: bool = ..., - pad_left: int | None = ..., - exp_digits: int | None = ..., - min_digits: int | None = ..., + sign: bool = False, + pad_left: int | None = None, + exp_digits: int | None = None, + min_digits: int | None = None, ) -> str: ... def format_float_positional( x: _FloatLike_co, - precision: int | None = ..., - unique: bool = ..., - fractional: bool = ..., + precision: int | None = None, + unique: bool = True, + fractional: bool = True, trim: _Trim = "k", - sign: bool = ..., - pad_left: int | None = ..., - pad_right: int | None = ..., - min_digits: int | None = ..., + sign: bool = False, + pad_left: int | None = None, + pad_right: int | None = None, + min_digits: int | None = None, ) -> str: ... def array_repr( arr: NDArray[Any], - max_line_width: int | None = ..., - precision: SupportsIndex | None = ..., - suppress_small: bool | None = ..., + max_line_width: int | None = None, + precision: SupportsIndex | None = None, + suppress_small: bool | None = None, ) -> str: ... def array_str( a: NDArray[Any], - max_line_width: int | None = ..., - precision: SupportsIndex | None = ..., - suppress_small: bool | None = ..., + max_line_width: int | None = None, + precision: SupportsIndex | None = None, + suppress_small: bool | None = None, ) -> str: ... def printoptions( precision: SupportsIndex | None = ..., diff --git a/numpy/_core/code_generators/cversions.txt b/numpy/_core/code_generators/cversions.txt index d448df066a19..b058875d0455 100644 --- a/numpy/_core/code_generators/cversions.txt +++ b/numpy/_core/code_generators/cversions.txt @@ -78,5 +78,10 @@ # Version 19 (NumPy 2.1.0) Only header additions # Version 19 (NumPy 2.2.0) No change 0x00000013 = 2b8f1f4da822491ff030b2b37dff07e3 -# Version 19 (NumPy 2.3.0) +# Version 20 (NumPy 2.3.0) 0x00000014 = e56b74d32a934d085e7c3414cb9999b8, +# Version 21 (NumPy 2.4.0) +# Add 'same_value' casting, header additions. +# General loop registration for ufuncs, sort, and argsort +# Version 21 (NumPy 2.5.0) No change +0x00000015 = fbd24fc5b2ba4f7cd3606ec6128de7a5 diff --git a/numpy/_core/code_generators/genapi.py b/numpy/_core/code_generators/genapi.py index caeaf7a08532..1087d176816b 100644 --- a/numpy/_core/code_generators/genapi.py +++ b/numpy/_core/code_generators/genapi.py @@ -17,11 +17,11 @@ def get_processor(): - # Convoluted because we can't import from numpy.distutils + # Convoluted because we can't import from numpy # (numpy is not yet built) conv_template_path = os.path.join( os.path.dirname(__file__), - '..', '..', 'distutils', 'conv_template.py' + '..', '..', '_build_utils', 'conv_template.py' ) spec = importlib.util.spec_from_file_location( 'conv_template', conv_template_path @@ -62,7 +62,7 @@ def get_processor(): join('multiarray', 'descriptor.c'), join('multiarray', 'dlpack.c'), join('multiarray', 'dtypemeta.c'), - join('multiarray', 'einsum.c.src'), + join('multiarray', 'einsum.cpp'), join('multiarray', 'public_dtype_api.c'), join('multiarray', 'flagsobject.c'), join('multiarray', 'getset.c'), @@ -466,8 +466,7 @@ def _key(x): def merge_api_dicts(dicts): ret = {} for d in dicts: - for k, v in d.items(): - ret[k] = v + ret.update(d) return ret diff --git a/numpy/_core/code_generators/generate_numpy_api.py b/numpy/_core/code_generators/generate_numpy_api.py index dc11bcd2c272..23d678872ca4 100644 --- a/numpy/_core/code_generators/generate_numpy_api.py +++ b/numpy/_core/code_generators/generate_numpy_api.py @@ -157,6 +157,12 @@ return 0; } +#if (SWIG_VERSION < 0x040400) +#define _RETURN_VALUE NULL +#else +#define _RETURN_VALUE 0 +#endif + #define import_array() { \ if (_import_array() < 0) { \ PyErr_Print(); \ @@ -164,7 +170,7 @@ PyExc_ImportError, \ "numpy._core.multiarray failed to import" \ ); \ - return NULL; \ + return _RETURN_VALUE; \ } \ } diff --git a/numpy/_core/code_generators/numpy_api.py b/numpy/_core/code_generators/numpy_api.py index b366dc99dfb8..c2b471c71757 100644 --- a/numpy/_core/code_generators/numpy_api.py +++ b/numpy/_core/code_generators/numpy_api.py @@ -19,7 +19,7 @@ def get_annotations(): - # Convoluted because we can't import from numpy.distutils + # Convoluted because we can't import numpy # (numpy is not yet built) genapi_py = os.path.join(os.path.dirname(__file__), 'genapi.py') spec = importlib.util.spec_from_file_location('conv_template', genapi_py) @@ -412,7 +412,7 @@ def get_annotations(): } ufunc_types_api = { - 'PyUFunc_Type': (0,) + 'PyUFunc_Type': (0,), } ufunc_funcs_api = { @@ -468,6 +468,8 @@ def get_annotations(): 'PyUFunc_AddPromoter': (44, MinVersion("2.0")), 'PyUFunc_AddWrappingLoop': (45, MinVersion("2.0")), 'PyUFunc_GiveFloatingpointErrors': (46, MinVersion("2.0")), + # End 2.0 API + 'PyUFunc_AddLoopsFromSpecs': (47, MinVersion("2.4")), } # List of all the dicts which define the C API diff --git a/numpy/_core/code_generators/ufunc_docstrings.py b/numpy/_core/code_generators/ufunc_docstrings.py index 5d3ba73c92f0..1630a9d6f136 100644 --- a/numpy/_core/code_generators/ufunc_docstrings.py +++ b/numpy/_core/code_generators/ufunc_docstrings.py @@ -332,7 +332,7 @@ def add_newdoc(place, name, doc): add_newdoc('numpy._core.umath', 'arcsinh', """ - Inverse hyperbolic sine element-wise. + Inverse hyperbolic sine, element-wise. Parameters ---------- @@ -534,7 +534,7 @@ def add_newdoc(place, name, doc): add_newdoc('numpy._core.umath', 'arctanh', """ - Inverse hyperbolic tangent element-wise. + Inverse hyperbolic tangent, element-wise. Parameters ---------- @@ -917,7 +917,7 @@ def add_newdoc(place, name, doc): array([ 1.00000000e+00, 6.12303177e-17, -1.00000000e+00]) >>> >>> # Example of providing the optional output parameter - >>> out1 = np.array([0], dtype='d') + >>> out1 = np.array([0], dtype=np.float64) >>> out2 = np.cos([0.1], out1) >>> out2 is out1 True @@ -1145,7 +1145,7 @@ def add_newdoc(place, name, doc): ------- out : ndarray or scalar Output array, element-wise comparison of `x1` and `x2`. - Typically of type bool, unless ``dtype=object`` is passed. + Typically of type bool, unless ``dtype=np.object_`` is passed. $OUT_SCALAR_2 See Also @@ -1506,7 +1506,7 @@ def add_newdoc(place, name, doc): ------- out : ndarray or scalar Output array, element-wise comparison of `x1` and `x2`. - Typically of type bool, unless ``dtype=object`` is passed. + Typically of type bool, unless ``dtype=np.object_`` is passed. $OUT_SCALAR_2 @@ -1545,7 +1545,7 @@ def add_newdoc(place, name, doc): ------- out : bool or ndarray of bool Output array, element-wise comparison of `x1` and `x2`. - Typically of type bool, unless ``dtype=object`` is passed. + Typically of type bool, unless ``dtype=np.object_`` is passed. $OUT_SCALAR_2 See Also @@ -1957,7 +1957,7 @@ def add_newdoc(place, name, doc): ------- out : ndarray or scalar Output array, element-wise comparison of `x1` and `x2`. - Typically of type bool, unless ``dtype=object`` is passed. + Typically of type bool, unless ``dtype=np.object_`` is passed. $OUT_SCALAR_2 See Also @@ -1994,7 +1994,7 @@ def add_newdoc(place, name, doc): ------- out : ndarray or scalar Output array, element-wise comparison of `x1` and `x2`. - Typically of type bool, unless ``dtype=object`` is passed. + Typically of type bool, unless ``dtype=np.object_`` is passed. $OUT_SCALAR_2 See Also @@ -2963,7 +2963,7 @@ def add_newdoc(place, name, doc): matrix-vector product is defined as: .. math:: - \\mathbf{A} \\cdot \\mathbf{b} = \\sum_{j=0}^{n-1} A_{ij} v_j + \\mathbf{A} \\cdot \\mathbf{v} = \\sum_{j=0}^{n-1} A_{ij} v_j where the sum is over the last dimensions in ``x1`` and ``x2`` (unless ``axes`` is specified). (For a matrix-vector product with the @@ -3030,7 +3030,7 @@ def add_newdoc(place, name, doc): vector-matrix product is defined as: .. math:: - \\mathbf{b} \\cdot \\mathbf{A} = \\sum_{i=0}^{n-1} \\overline{v_i}A_{ij} + \\mathbf{v} \\cdot \\mathbf{A} = \\sum_{i=0}^{n-1} \\overline{v_i}A_{ij} where the sum is over the last dimension of ``x1`` and the one-but-last dimensions in ``x2`` (unless `axes` is specified) and where @@ -3174,7 +3174,7 @@ def add_newdoc(place, name, doc): add_newdoc('numpy._core.umath', 'negative', """ - Numerical negative, element-wise. + Numerical negation, element-wise. Parameters ---------- @@ -3255,7 +3255,7 @@ def add_newdoc(place, name, doc): ------- out : ndarray or scalar Output array, element-wise comparison of `x1` and `x2`. - Typically of type bool, unless ``dtype=object`` is passed. + Typically of type bool, unless ``dtype=np.object_`` is passed. $OUT_SCALAR_2 See Also @@ -3373,9 +3373,9 @@ def add_newdoc(place, name, doc): >>> p array([nan, nan]) - To get complex results, give the argument ``dtype=complex``. + To get complex results, give the argument ``dtype=np.complex128``. - >>> np.power(x3, 1.5, dtype=complex) + >>> np.power(x3, 1.5, dtype=np.complex128) array([-1.83697020e-16-1.j, -1.46957616e-15-8.j]) """) @@ -3452,9 +3452,9 @@ def add_newdoc(place, name, doc): >>> p array([nan, nan]) - To get complex results, give the argument ``dtype=complex``. + To get complex results, give the argument ``dtype=np.complex128``. - >>> np.float_power(x3, 1.5, dtype=complex) + >>> np.float_power(x3, 1.5, dtype=np.complex128) array([-1.83697020e-16-1.j, -1.46957616e-15-8.j]) """) @@ -3792,6 +3792,11 @@ def add_newdoc(place, name, doc): The sign of `x`. $OUT_SCALAR_1 + See Also + -------- + signbit + copysign + Notes ----- There is more than one definition of sign in common use for complex @@ -3828,6 +3833,11 @@ def add_newdoc(place, name, doc): Output array, or reference to `out` if that was supplied. $OUT_SCALAR_1 + See Also + -------- + sign + copysign + Examples -------- >>> import numpy as np @@ -3859,6 +3869,11 @@ def add_newdoc(place, name, doc): The values of `x1` with the sign of `x2`. $OUT_SCALAR_2 + See Also + -------- + sign + signbit + Examples -------- >>> import numpy as np @@ -4040,7 +4055,7 @@ def add_newdoc(place, name, doc): >>> # Discrepancy due to vagaries of floating point arithmetic. >>> # Example of providing the optional output parameter - >>> out1 = np.array([0], dtype='d') + >>> out1 = np.array([0], dtype=np.float64) >>> out2 = np.sinh([0.1], out1) >>> out2 is out1 True @@ -4241,7 +4256,7 @@ def add_newdoc(place, name, doc): >>> >>> # Example of providing the optional output parameter illustrating >>> # that what is returned is a reference to said parameter - >>> out1 = np.array([0], dtype='d') + >>> out1 = np.array([0], dtype=np.float64) >>> out2 = np.cos([0.1], out1) >>> out2 is out1 True @@ -4256,7 +4271,7 @@ def add_newdoc(place, name, doc): add_newdoc('numpy._core.umath', 'tanh', """ - Compute hyperbolic tangent element-wise. + Hyperbolic tangent, element-wise. Equivalent to ``np.sinh(x)/np.cosh(x)`` or ``-1j * np.tan(1j*x)``. @@ -4294,7 +4309,7 @@ def add_newdoc(place, name, doc): >>> # Example of providing the optional output parameter illustrating >>> # that what is returned is a reference to said parameter - >>> out1 = np.array([0], dtype='d') + >>> out1 = np.array([0], dtype=np.float64) >>> out2 = np.tanh([0.1], out1) >>> out2 is out1 True diff --git a/numpy/_core/defchararray.py b/numpy/_core/defchararray.py index bde8921f5504..1a8750507f41 100644 --- a/numpy/_core/defchararray.py +++ b/numpy/_core/defchararray.py @@ -22,31 +22,19 @@ from numpy._core.multiarray import compare_chararrays from numpy._core.strings import ( _join as join, -) -from numpy._core.strings import ( _rsplit as rsplit, -) -from numpy._core.strings import ( _split as split, -) -from numpy._core.strings import ( _splitlines as splitlines, ) from numpy._utils import set_module from numpy.strings import * from numpy.strings import ( multiply as strings_multiply, -) -from numpy.strings import ( partition as strings_partition, -) -from numpy.strings import ( rpartition as strings_rpartition, ) -from .numeric import array as narray -from .numeric import asarray as asnarray -from .numeric import ndarray +from .numeric import array as narray, asarray as asnarray, ndarray from .numerictypes import bytes_, character, str_ __all__ = [ @@ -508,7 +496,6 @@ class adds the following functionality: title tofile tolist - tostring translate transpose upper @@ -731,7 +718,7 @@ def __mod__(self, i): def __rmod__(self, other): return NotImplemented - def argsort(self, axis=-1, kind=None, order=None): + def argsort(self, axis=-1, kind=None, order=None, *, stable=None): """ Return the indices that sort the array lexicographically. @@ -749,7 +736,7 @@ def argsort(self, axis=-1, kind=None, order=None): dtype='|S5') """ - return self.__array__().argsort(axis, kind, order) + return self.__array__().argsort(axis, kind, order, stable=stable) argsort.__doc__ = ndarray.argsort.__doc__ def capitalize(self): diff --git a/numpy/_core/defchararray.pyi b/numpy/_core/defchararray.pyi index 776962c53998..bc587ed846ba 100644 --- a/numpy/_core/defchararray.pyi +++ b/numpy/_core/defchararray.pyi @@ -1,27 +1,23 @@ -from typing import Any, Self, SupportsIndex, SupportsInt, TypeAlias, overload -from typing import Literal as L - +from collections.abc import Buffer +from typing import Any, Literal as L, Self, SupportsIndex, SupportsInt, overload from typing_extensions import TypeVar import numpy as np -from numpy import ( - _OrderKACF, - _SupportsBuffer, - bytes_, - dtype, - int_, - ndarray, - object_, - str_, -) +from numpy import _OrderKACF, bytes_, dtype, int_, ndarray, object_, str_ from numpy._core.multiarray import compare_chararrays -from numpy._typing import NDArray, _AnyShape, _Shape, _ShapeLike, _SupportsArray -from numpy._typing import _ArrayLikeAnyString_co as UST_co -from numpy._typing import _ArrayLikeBool_co as b_co -from numpy._typing import _ArrayLikeBytes_co as S_co -from numpy._typing import _ArrayLikeInt_co as i_co -from numpy._typing import _ArrayLikeStr_co as U_co -from numpy._typing import _ArrayLikeString_co as T_co +from numpy._typing import ( + NDArray, + _AnyShape, + _ArrayLikeAnyString_co as UST_co, + _ArrayLikeBool_co as b_co, + _ArrayLikeBytes_co as S_co, + _ArrayLikeInt_co as i_co, + _ArrayLikeStr_co as U_co, + _ArrayLikeString_co as T_co, + _Shape, + _ShapeLike, + _SupportsArray, +) __all__ = [ "equal", @@ -80,45 +76,56 @@ __all__ = [ ] _ShapeT_co = TypeVar("_ShapeT_co", bound=_Shape, default=_AnyShape, covariant=True) -_CharacterT = TypeVar("_CharacterT", bound=np.character) _CharDTypeT_co = TypeVar("_CharDTypeT_co", bound=dtype[np.character], default=dtype, covariant=True) -_CharArray: TypeAlias = chararray[_AnyShape, dtype[_CharacterT]] +type _CharArray[ScalarT: np.character] = chararray[_AnyShape, dtype[ScalarT]] -_StringDTypeArray: TypeAlias = np.ndarray[_AnyShape, np.dtypes.StringDType] -_StringDTypeOrUnicodeArray: TypeAlias = _StringDTypeArray | NDArray[np.str_] -_StringDTypeSupportsArray: TypeAlias = _SupportsArray[np.dtypes.StringDType] +type _StringDTypeArray = np.ndarray[_AnyShape, np.dtypes.StringDType] +type _StringDTypeOrUnicodeArray = _StringDTypeArray | NDArray[np.str_] +type _StringDTypeSupportsArray = _SupportsArray[np.dtypes.StringDType] class chararray(ndarray[_ShapeT_co, _CharDTypeT_co]): @overload def __new__( subtype, shape: _ShapeLike, - itemsize: SupportsIndex | SupportsInt = ..., - unicode: L[False] = ..., - buffer: _SupportsBuffer = ..., - offset: SupportsIndex = ..., - strides: _ShapeLike = ..., - order: _OrderKACF = ..., + itemsize: SupportsIndex | SupportsInt = 1, + unicode: L[False] = False, + buffer: Buffer | np.ndarray | None = None, + offset: SupportsIndex = 0, + strides: _ShapeLike | None = None, + order: _OrderKACF = "C", ) -> _CharArray[bytes_]: ... @overload def __new__( subtype, shape: _ShapeLike, - itemsize: SupportsIndex | SupportsInt = ..., - unicode: L[True] = ..., - buffer: _SupportsBuffer = ..., - offset: SupportsIndex = ..., - strides: _ShapeLike = ..., - order: _OrderKACF = ..., + itemsize: SupportsIndex | SupportsInt, + unicode: L[True], + buffer: Buffer | np.ndarray | None = None, + offset: SupportsIndex = 0, + strides: _ShapeLike | None = None, + order: _OrderKACF = "C", + ) -> _CharArray[str_]: ... + @overload + def __new__( + subtype, + shape: _ShapeLike, + itemsize: SupportsIndex | SupportsInt = 1, + *, + unicode: L[True], + buffer: Buffer | np.ndarray | None = None, + offset: SupportsIndex = 0, + strides: _ShapeLike | None = None, + order: _OrderKACF = "C", ) -> _CharArray[str_]: ... def __array_finalize__(self, obj: object) -> None: ... - def __mul__(self, other: i_co) -> chararray[_AnyShape, _CharDTypeT_co]: ... - def __rmul__(self, other: i_co) -> chararray[_AnyShape, _CharDTypeT_co]: ... - def __mod__(self, i: Any) -> chararray[_AnyShape, _CharDTypeT_co]: ... + def __mul__(self, other: i_co) -> chararray[_AnyShape, _CharDTypeT_co]: ... # type: ignore[override] + def __rmul__(self, other: i_co) -> chararray[_AnyShape, _CharDTypeT_co]: ... # type: ignore[override] + def __mod__(self, i: Any) -> chararray[_AnyShape, _CharDTypeT_co]: ... # type: ignore[override] - @overload + @overload # type: ignore[override] def __eq__( self: _CharArray[str_], other: U_co, @@ -129,7 +136,7 @@ class chararray(ndarray[_ShapeT_co, _CharDTypeT_co]): other: S_co, ) -> NDArray[np.bool]: ... - @overload + @overload # type: ignore[override] def __ne__( self: _CharArray[str_], other: U_co, @@ -140,7 +147,7 @@ class chararray(ndarray[_ShapeT_co, _CharDTypeT_co]): other: S_co, ) -> NDArray[np.bool]: ... - @overload + @overload # type: ignore[override] def __ge__( self: _CharArray[str_], other: U_co, @@ -151,7 +158,7 @@ class chararray(ndarray[_ShapeT_co, _CharDTypeT_co]): other: S_co, ) -> NDArray[np.bool]: ... - @overload + @overload # type: ignore[override] def __le__( self: _CharArray[str_], other: U_co, @@ -162,7 +169,7 @@ class chararray(ndarray[_ShapeT_co, _CharDTypeT_co]): other: S_co, ) -> NDArray[np.bool]: ... - @overload + @overload # type: ignore[override] def __gt__( self: _CharArray[str_], other: U_co, @@ -173,7 +180,7 @@ class chararray(ndarray[_ShapeT_co, _CharDTypeT_co]): other: S_co, ) -> NDArray[np.bool]: ... - @overload + @overload # type: ignore[override] def __lt__( self: _CharArray[str_], other: U_co, @@ -184,7 +191,7 @@ class chararray(ndarray[_ShapeT_co, _CharDTypeT_co]): other: S_co, ) -> NDArray[np.bool]: ... - @overload + @overload # type: ignore[override] def __add__( self: _CharArray[str_], other: U_co, @@ -195,7 +202,7 @@ class chararray(ndarray[_ShapeT_co, _CharDTypeT_co]): other: S_co, ) -> _CharArray[bytes_]: ... - @overload + @overload # type: ignore[override] def __radd__( self: _CharArray[str_], other: U_co, @@ -210,90 +217,90 @@ class chararray(ndarray[_ShapeT_co, _CharDTypeT_co]): def center( self: _CharArray[str_], width: i_co, - fillchar: U_co = ..., + fillchar: U_co = " ", ) -> _CharArray[str_]: ... @overload def center( self: _CharArray[bytes_], width: i_co, - fillchar: S_co = ..., + fillchar: str | S_co = " ", ) -> _CharArray[bytes_]: ... @overload def count( self: _CharArray[str_], sub: U_co, - start: i_co = ..., - end: i_co | None = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[int_]: ... @overload def count( self: _CharArray[bytes_], sub: S_co, - start: i_co = ..., - end: i_co | None = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[int_]: ... def decode( self: _CharArray[bytes_], - encoding: str | None = ..., - errors: str | None = ..., + encoding: str | None = None, + errors: str | None = None, ) -> _CharArray[str_]: ... def encode( self: _CharArray[str_], - encoding: str | None = ..., - errors: str | None = ..., + encoding: str | None = None, + errors: str | None = None, ) -> _CharArray[bytes_]: ... @overload def endswith( self: _CharArray[str_], suffix: U_co, - start: i_co = ..., - end: i_co | None = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[np.bool]: ... @overload def endswith( self: _CharArray[bytes_], suffix: S_co, - start: i_co = ..., - end: i_co | None = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[np.bool]: ... def expandtabs( self, - tabsize: i_co = ..., + tabsize: i_co = 8, ) -> Self: ... @overload def find( self: _CharArray[str_], sub: U_co, - start: i_co = ..., - end: i_co | None = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[int_]: ... @overload def find( self: _CharArray[bytes_], sub: S_co, - start: i_co = ..., - end: i_co | None = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[int_]: ... @overload def index( self: _CharArray[str_], sub: U_co, - start: i_co = ..., - end: i_co | None = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[int_]: ... @overload def index( self: _CharArray[bytes_], sub: S_co, - start: i_co = ..., - end: i_co | None = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[int_]: ... @overload @@ -311,27 +318,27 @@ class chararray(ndarray[_ShapeT_co, _CharDTypeT_co]): def ljust( self: _CharArray[str_], width: i_co, - fillchar: U_co = ..., + fillchar: U_co = " ", ) -> _CharArray[str_]: ... @overload def ljust( self: _CharArray[bytes_], width: i_co, - fillchar: S_co = ..., + fillchar: str | S_co = " ", ) -> _CharArray[bytes_]: ... @overload def lstrip( self: _CharArray[str_], - chars: U_co | None = ..., + chars: U_co | None = None, ) -> _CharArray[str_]: ... @overload def lstrip( self: _CharArray[bytes_], - chars: S_co | None = ..., + chars: S_co | None = None, ) -> _CharArray[bytes_]: ... - @overload + @overload # type: ignore[override] def partition( self: _CharArray[str_], sep: U_co, @@ -347,57 +354,57 @@ class chararray(ndarray[_ShapeT_co, _CharDTypeT_co]): self: _CharArray[str_], old: U_co, new: U_co, - count: i_co | None = ..., + count: i_co | None = None, ) -> _CharArray[str_]: ... @overload def replace( self: _CharArray[bytes_], old: S_co, new: S_co, - count: i_co | None = ..., + count: i_co | None = None, ) -> _CharArray[bytes_]: ... @overload def rfind( self: _CharArray[str_], sub: U_co, - start: i_co = ..., - end: i_co | None = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[int_]: ... @overload def rfind( self: _CharArray[bytes_], sub: S_co, - start: i_co = ..., - end: i_co | None = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[int_]: ... @overload def rindex( self: _CharArray[str_], sub: U_co, - start: i_co = ..., - end: i_co | None = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[int_]: ... @overload def rindex( self: _CharArray[bytes_], sub: S_co, - start: i_co = ..., - end: i_co | None = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[int_]: ... @overload def rjust( self: _CharArray[str_], width: i_co, - fillchar: U_co = ..., + fillchar: U_co = " ", ) -> _CharArray[str_]: ... @overload def rjust( self: _CharArray[bytes_], width: i_co, - fillchar: S_co = ..., + fillchar: str | S_co = " ", ) -> _CharArray[bytes_]: ... @overload @@ -414,79 +421,79 @@ class chararray(ndarray[_ShapeT_co, _CharDTypeT_co]): @overload def rsplit( self: _CharArray[str_], - sep: U_co | None = ..., - maxsplit: i_co | None = ..., + sep: U_co | None = None, + maxsplit: i_co | None = None, ) -> NDArray[object_]: ... @overload def rsplit( self: _CharArray[bytes_], - sep: S_co | None = ..., - maxsplit: i_co | None = ..., + sep: S_co | None = None, + maxsplit: i_co | None = None, ) -> NDArray[object_]: ... @overload def rstrip( self: _CharArray[str_], - chars: U_co | None = ..., + chars: U_co | None = None, ) -> _CharArray[str_]: ... @overload def rstrip( self: _CharArray[bytes_], - chars: S_co | None = ..., + chars: S_co | None = None, ) -> _CharArray[bytes_]: ... @overload def split( self: _CharArray[str_], - sep: U_co | None = ..., - maxsplit: i_co | None = ..., + sep: U_co | None = None, + maxsplit: i_co | None = None, ) -> NDArray[object_]: ... @overload def split( self: _CharArray[bytes_], - sep: S_co | None = ..., - maxsplit: i_co | None = ..., + sep: S_co | None = None, + maxsplit: i_co | None = None, ) -> NDArray[object_]: ... - def splitlines(self, keepends: b_co | None = ...) -> NDArray[object_]: ... + def splitlines(self, keepends: b_co | None = None) -> NDArray[object_]: ... @overload def startswith( self: _CharArray[str_], prefix: U_co, - start: i_co = ..., - end: i_co | None = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[np.bool]: ... @overload def startswith( self: _CharArray[bytes_], prefix: S_co, - start: i_co = ..., - end: i_co | None = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[np.bool]: ... @overload def strip( self: _CharArray[str_], - chars: U_co | None = ..., + chars: U_co | None = None, ) -> _CharArray[str_]: ... @overload def strip( self: _CharArray[bytes_], - chars: S_co | None = ..., + chars: S_co | None = None, ) -> _CharArray[bytes_]: ... @overload def translate( self: _CharArray[str_], table: U_co, - deletechars: U_co | None = ..., + deletechars: U_co | None = None, ) -> _CharArray[str_]: ... @overload def translate( self: _CharArray[bytes_], table: S_co, - deletechars: S_co | None = ..., + deletechars: S_co | None = None, ) -> _CharArray[bytes_]: ... def zfill(self, width: i_co) -> Self: ... @@ -555,7 +562,7 @@ def add(x1: S_co, x2: S_co) -> NDArray[np.bytes_]: ... @overload def add(x1: _StringDTypeSupportsArray, x2: _StringDTypeSupportsArray) -> _StringDTypeArray: ... @overload -def add(x1: T_co, T_co) -> _StringDTypeOrUnicodeArray: ... +def add(x1: T_co, x2: T_co) -> _StringDTypeOrUnicodeArray: ... @overload def multiply(a: U_co, i: i_co) -> NDArray[np.str_]: ... @@ -585,33 +592,33 @@ def capitalize(a: _StringDTypeSupportsArray) -> _StringDTypeArray: ... def capitalize(a: T_co) -> _StringDTypeOrUnicodeArray: ... @overload -def center(a: U_co, width: i_co, fillchar: U_co = ...) -> NDArray[str_]: ... +def center(a: U_co, width: i_co, fillchar: U_co = " ") -> NDArray[str_]: ... @overload -def center(a: S_co, width: i_co, fillchar: S_co = ...) -> NDArray[bytes_]: ... +def center(a: S_co, width: i_co, fillchar: str | S_co = " ") -> NDArray[bytes_]: ... @overload -def center(a: _StringDTypeSupportsArray, width: i_co, fillchar: _StringDTypeSupportsArray = ...) -> _StringDTypeArray: ... +def center(a: _StringDTypeSupportsArray, width: i_co, fillchar: str | _StringDTypeSupportsArray = " ") -> _StringDTypeArray: ... @overload -def center(a: T_co, width: i_co, fillchar: T_co = ...) -> _StringDTypeOrUnicodeArray: ... +def center(a: T_co, width: i_co, fillchar: T_co = " ") -> _StringDTypeOrUnicodeArray: ... def decode( a: S_co, - encoding: str | None = ..., - errors: str | None = ..., + encoding: str | None = None, + errors: str | None = None, ) -> NDArray[str_]: ... def encode( a: U_co | T_co, - encoding: str | None = ..., - errors: str | None = ..., + encoding: str | None = None, + errors: str | None = None, ) -> NDArray[bytes_]: ... @overload -def expandtabs(a: U_co, tabsize: i_co = ...) -> NDArray[str_]: ... +def expandtabs(a: U_co, tabsize: i_co = 8) -> NDArray[str_]: ... @overload -def expandtabs(a: S_co, tabsize: i_co = ...) -> NDArray[bytes_]: ... +def expandtabs(a: S_co, tabsize: i_co = 8) -> NDArray[bytes_]: ... @overload -def expandtabs(a: _StringDTypeSupportsArray, tabsize: i_co = ...) -> _StringDTypeArray: ... +def expandtabs(a: _StringDTypeSupportsArray, tabsize: i_co = 8) -> _StringDTypeArray: ... @overload -def expandtabs(a: T_co, tabsize: i_co = ...) -> _StringDTypeOrUnicodeArray: ... +def expandtabs(a: T_co, tabsize: i_co = 8) -> _StringDTypeOrUnicodeArray: ... @overload def join(sep: U_co, seq: U_co) -> NDArray[str_]: ... @@ -623,13 +630,13 @@ def join(sep: _StringDTypeSupportsArray, seq: _StringDTypeSupportsArray) -> _Str def join(sep: T_co, seq: T_co) -> _StringDTypeOrUnicodeArray: ... @overload -def ljust(a: U_co, width: i_co, fillchar: U_co = ...) -> NDArray[str_]: ... +def ljust(a: U_co, width: i_co, fillchar: U_co = " ") -> NDArray[str_]: ... @overload -def ljust(a: S_co, width: i_co, fillchar: S_co = ...) -> NDArray[bytes_]: ... +def ljust(a: S_co, width: i_co, fillchar: str | S_co = " ") -> NDArray[bytes_]: ... @overload -def ljust(a: _StringDTypeSupportsArray, width: i_co, fillchar: _StringDTypeSupportsArray = ...) -> _StringDTypeArray: ... +def ljust(a: _StringDTypeSupportsArray, width: i_co, fillchar: str | _StringDTypeSupportsArray = " ") -> _StringDTypeArray: ... @overload -def ljust(a: T_co, width: i_co, fillchar: T_co = ...) -> _StringDTypeOrUnicodeArray: ... +def ljust(a: T_co, width: i_co, fillchar: T_co = " ") -> _StringDTypeOrUnicodeArray: ... @overload def lower(a: U_co) -> NDArray[str_]: ... @@ -641,13 +648,13 @@ def lower(a: _StringDTypeSupportsArray) -> _StringDTypeArray: ... def lower(a: T_co) -> _StringDTypeOrUnicodeArray: ... @overload -def lstrip(a: U_co, chars: U_co | None = ...) -> NDArray[str_]: ... +def lstrip(a: U_co, chars: U_co | None = None) -> NDArray[str_]: ... @overload -def lstrip(a: S_co, chars: S_co | None = ...) -> NDArray[bytes_]: ... +def lstrip(a: S_co, chars: S_co | None = None) -> NDArray[bytes_]: ... @overload -def lstrip(a: _StringDTypeSupportsArray, chars: _StringDTypeSupportsArray | None = ...) -> _StringDTypeArray: ... +def lstrip(a: _StringDTypeSupportsArray, chars: _StringDTypeSupportsArray | None = None) -> _StringDTypeArray: ... @overload -def lstrip(a: T_co, chars: T_co | None = ...) -> _StringDTypeOrUnicodeArray: ... +def lstrip(a: T_co, chars: T_co | None = None) -> _StringDTypeOrUnicodeArray: ... @overload def partition(a: U_co, sep: U_co) -> NDArray[str_]: ... @@ -663,53 +670,53 @@ def replace( a: U_co, old: U_co, new: U_co, - count: i_co | None = ..., + count: i_co | None = -1, ) -> NDArray[str_]: ... @overload def replace( a: S_co, old: S_co, new: S_co, - count: i_co | None = ..., + count: i_co | None = -1, ) -> NDArray[bytes_]: ... @overload def replace( a: _StringDTypeSupportsArray, old: _StringDTypeSupportsArray, new: _StringDTypeSupportsArray, - count: i_co = ..., + count: i_co = -1, ) -> _StringDTypeArray: ... @overload def replace( a: T_co, old: T_co, new: T_co, - count: i_co = ..., + count: i_co = -1, ) -> _StringDTypeOrUnicodeArray: ... @overload def rjust( a: U_co, width: i_co, - fillchar: U_co = ..., + fillchar: U_co = " ", ) -> NDArray[str_]: ... @overload def rjust( a: S_co, width: i_co, - fillchar: S_co = ..., + fillchar: str | S_co = " ", ) -> NDArray[bytes_]: ... @overload def rjust( a: _StringDTypeSupportsArray, width: i_co, - fillchar: _StringDTypeSupportsArray = ..., + fillchar: str | _StringDTypeSupportsArray = " ", ) -> _StringDTypeArray: ... @overload def rjust( a: T_co, width: i_co, - fillchar: T_co = ..., + fillchar: T_co = " ", ) -> _StringDTypeOrUnicodeArray: ... @overload @@ -724,72 +731,72 @@ def rpartition(a: T_co, sep: T_co) -> _StringDTypeOrUnicodeArray: ... @overload def rsplit( a: U_co, - sep: U_co | None = ..., - maxsplit: i_co | None = ..., + sep: U_co | None = None, + maxsplit: i_co | None = None, ) -> NDArray[object_]: ... @overload def rsplit( a: S_co, - sep: S_co | None = ..., - maxsplit: i_co | None = ..., + sep: S_co | None = None, + maxsplit: i_co | None = None, ) -> NDArray[object_]: ... @overload def rsplit( a: _StringDTypeSupportsArray, - sep: _StringDTypeSupportsArray | None = ..., - maxsplit: i_co | None = ..., + sep: _StringDTypeSupportsArray | None = None, + maxsplit: i_co | None = None, ) -> NDArray[object_]: ... @overload def rsplit( a: T_co, - sep: T_co | None = ..., - maxsplit: i_co | None = ..., + sep: T_co | None = None, + maxsplit: i_co | None = None, ) -> NDArray[object_]: ... @overload -def rstrip(a: U_co, chars: U_co | None = ...) -> NDArray[str_]: ... +def rstrip(a: U_co, chars: U_co | None = None) -> NDArray[str_]: ... @overload -def rstrip(a: S_co, chars: S_co | None = ...) -> NDArray[bytes_]: ... +def rstrip(a: S_co, chars: S_co | None = None) -> NDArray[bytes_]: ... @overload -def rstrip(a: _StringDTypeSupportsArray, chars: _StringDTypeSupportsArray | None = ...) -> _StringDTypeArray: ... +def rstrip(a: _StringDTypeSupportsArray, chars: _StringDTypeSupportsArray | None = None) -> _StringDTypeArray: ... @overload -def rstrip(a: T_co, chars: T_co | None = ...) -> _StringDTypeOrUnicodeArray: ... +def rstrip(a: T_co, chars: T_co | None = None) -> _StringDTypeOrUnicodeArray: ... @overload def split( a: U_co, - sep: U_co | None = ..., - maxsplit: i_co | None = ..., + sep: U_co | None = None, + maxsplit: i_co | None = None, ) -> NDArray[object_]: ... @overload def split( a: S_co, - sep: S_co | None = ..., - maxsplit: i_co | None = ..., + sep: S_co | None = None, + maxsplit: i_co | None = None, ) -> NDArray[object_]: ... @overload def split( a: _StringDTypeSupportsArray, - sep: _StringDTypeSupportsArray | None = ..., - maxsplit: i_co | None = ..., + sep: _StringDTypeSupportsArray | None = None, + maxsplit: i_co | None = None, ) -> NDArray[object_]: ... @overload def split( a: T_co, - sep: T_co | None = ..., - maxsplit: i_co | None = ..., + sep: T_co | None = None, + maxsplit: i_co | None = None, ) -> NDArray[object_]: ... -def splitlines(a: UST_co, keepends: b_co | None = ...) -> NDArray[np.object_]: ... +def splitlines(a: UST_co, keepends: b_co | None = None) -> NDArray[np.object_]: ... @overload -def strip(a: U_co, chars: U_co | None = ...) -> NDArray[str_]: ... +def strip(a: U_co, chars: U_co | None = None) -> NDArray[str_]: ... @overload -def strip(a: S_co, chars: S_co | None = ...) -> NDArray[bytes_]: ... +def strip(a: S_co, chars: S_co | None = None) -> NDArray[bytes_]: ... @overload -def strip(a: _StringDTypeSupportsArray, chars: _StringDTypeSupportsArray | None = ...) -> _StringDTypeArray: ... +def strip(a: _StringDTypeSupportsArray, chars: _StringDTypeSupportsArray | None = None) -> _StringDTypeArray: ... @overload -def strip(a: T_co, chars: T_co | None = ...) -> _StringDTypeOrUnicodeArray: ... +def strip(a: T_co, chars: T_co | None = None) -> _StringDTypeOrUnicodeArray: ... @overload def swapcase(a: U_co) -> NDArray[str_]: ... @@ -813,25 +820,25 @@ def title(a: T_co) -> _StringDTypeOrUnicodeArray: ... def translate( a: U_co, table: str, - deletechars: str | None = ..., + deletechars: str | None = None, ) -> NDArray[str_]: ... @overload def translate( a: S_co, table: str, - deletechars: str | None = ..., + deletechars: str | None = None, ) -> NDArray[bytes_]: ... @overload def translate( a: _StringDTypeSupportsArray, table: str, - deletechars: str | None = ..., + deletechars: str | None = None, ) -> _StringDTypeArray: ... @overload def translate( a: T_co, table: str, - deletechars: str | None = ..., + deletechars: str | None = None, ) -> _StringDTypeOrUnicodeArray: ... @overload @@ -857,88 +864,88 @@ def zfill(a: T_co, width: i_co) -> _StringDTypeOrUnicodeArray: ... def count( a: U_co, sub: U_co, - start: i_co = ..., - end: i_co | None = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[int_]: ... @overload def count( a: S_co, sub: S_co, - start: i_co = ..., - end: i_co | None = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[int_]: ... @overload def count( a: T_co, sub: T_co, - start: i_co = ..., - end: i_co | None = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[np.int_]: ... @overload def endswith( a: U_co, suffix: U_co, - start: i_co = ..., - end: i_co | None = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[np.bool]: ... @overload def endswith( a: S_co, suffix: S_co, - start: i_co = ..., - end: i_co | None = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[np.bool]: ... @overload def endswith( a: T_co, suffix: T_co, - start: i_co = ..., - end: i_co | None = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[np.bool]: ... @overload def find( a: U_co, sub: U_co, - start: i_co = ..., - end: i_co | None = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[int_]: ... @overload def find( a: S_co, sub: S_co, - start: i_co = ..., - end: i_co | None = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[int_]: ... @overload def find( a: T_co, sub: T_co, - start: i_co = ..., - end: i_co | None = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[np.int_]: ... @overload def index( a: U_co, sub: U_co, - start: i_co = ..., - end: i_co | None = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[int_]: ... @overload def index( a: S_co, sub: S_co, - start: i_co = ..., - end: i_co | None = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[int_]: ... @overload def index( a: T_co, sub: T_co, - start: i_co = ..., - end: i_co | None = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[np.int_]: ... def isalpha(a: UST_co) -> NDArray[np.bool]: ... @@ -955,131 +962,181 @@ def isupper(a: UST_co) -> NDArray[np.bool]: ... def rfind( a: U_co, sub: U_co, - start: i_co = ..., - end: i_co | None = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[int_]: ... @overload def rfind( a: S_co, sub: S_co, - start: i_co = ..., - end: i_co | None = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[int_]: ... @overload def rfind( a: T_co, sub: T_co, - start: i_co = ..., - end: i_co | None = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[np.int_]: ... @overload def rindex( a: U_co, sub: U_co, - start: i_co = ..., - end: i_co | None = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[int_]: ... @overload def rindex( a: S_co, sub: S_co, - start: i_co = ..., - end: i_co | None = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[int_]: ... @overload def rindex( a: T_co, sub: T_co, - start: i_co = ..., - end: i_co | None = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[np.int_]: ... @overload def startswith( a: U_co, prefix: U_co, - start: i_co = ..., - end: i_co | None = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[np.bool]: ... @overload def startswith( a: S_co, prefix: S_co, - start: i_co = ..., - end: i_co | None = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[np.bool]: ... @overload def startswith( a: T_co, - suffix: T_co, - start: i_co = ..., - end: i_co | None = ..., + prefix: T_co, + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[np.bool]: ... def str_len(A: UST_co) -> NDArray[int_]: ... # Overload 1 and 2: str- or bytes-based array-likes -# overload 3: arbitrary object with unicode=False (-> bytes_) -# overload 4: arbitrary object with unicode=True (-> str_) +# overload 3 and 4: arbitrary object with unicode=False (-> bytes_) +# overload 5 and 6: arbitrary object with unicode=True (-> str_) +# overload 7: arbitrary object with unicode=None (default) (-> str_ | bytes_) @overload def array( obj: U_co, - itemsize: int | None = ..., - copy: bool = ..., - unicode: L[False] = ..., - order: _OrderKACF = ..., + itemsize: int | None = None, + copy: bool = True, + unicode: L[True] | None = None, + order: _OrderKACF = None, ) -> _CharArray[str_]: ... @overload def array( obj: S_co, - itemsize: int | None = ..., - copy: bool = ..., - unicode: L[False] = ..., - order: _OrderKACF = ..., + itemsize: int | None = None, + copy: bool = True, + unicode: L[False] | None = None, + order: _OrderKACF = None, +) -> _CharArray[bytes_]: ... +@overload +def array( + obj: object, + itemsize: int | None, + copy: bool, + unicode: L[False], + order: _OrderKACF = None, ) -> _CharArray[bytes_]: ... @overload def array( obj: object, - itemsize: int | None = ..., - copy: bool = ..., - unicode: L[False] = ..., - order: _OrderKACF = ..., + itemsize: int | None = None, + copy: bool = True, + *, + unicode: L[False], + order: _OrderKACF = None, ) -> _CharArray[bytes_]: ... @overload def array( obj: object, - itemsize: int | None = ..., - copy: bool = ..., - unicode: L[True] = ..., - order: _OrderKACF = ..., + itemsize: int | None, + copy: bool, + unicode: L[True], + order: _OrderKACF = None, +) -> _CharArray[str_]: ... +@overload +def array( + obj: object, + itemsize: int | None = None, + copy: bool = True, + *, + unicode: L[True], + order: _OrderKACF = None, ) -> _CharArray[str_]: ... +@overload +def array( + obj: object, + itemsize: int | None = None, + copy: bool = True, + unicode: bool | None = None, + order: _OrderKACF = None, +) -> _CharArray[str_] | _CharArray[bytes_]: ... @overload def asarray( obj: U_co, - itemsize: int | None = ..., - unicode: L[False] = ..., - order: _OrderKACF = ..., + itemsize: int | None = None, + unicode: L[True] | None = None, + order: _OrderKACF = None, ) -> _CharArray[str_]: ... @overload def asarray( obj: S_co, - itemsize: int | None = ..., - unicode: L[False] = ..., - order: _OrderKACF = ..., + itemsize: int | None = None, + unicode: L[False] | None = None, + order: _OrderKACF = None, +) -> _CharArray[bytes_]: ... +@overload +def asarray( + obj: object, + itemsize: int | None, + unicode: L[False], + order: _OrderKACF = None, ) -> _CharArray[bytes_]: ... @overload def asarray( obj: object, - itemsize: int | None = ..., - unicode: L[False] = ..., - order: _OrderKACF = ..., + itemsize: int | None = None, + *, + unicode: L[False], + order: _OrderKACF = None, ) -> _CharArray[bytes_]: ... @overload def asarray( obj: object, - itemsize: int | None = ..., - unicode: L[True] = ..., - order: _OrderKACF = ..., + itemsize: int | None, + unicode: L[True], + order: _OrderKACF = None, +) -> _CharArray[str_]: ... +@overload +def asarray( + obj: object, + itemsize: int | None = None, + *, + unicode: L[True], + order: _OrderKACF = None, ) -> _CharArray[str_]: ... +@overload +def asarray( + obj: object, + itemsize: int | None = None, + unicode: bool | None = None, + order: _OrderKACF = None, +) -> _CharArray[str_] | _CharArray[bytes_]: ... diff --git a/numpy/_core/einsumfunc.py b/numpy/_core/einsumfunc.py index 8e71e6d4b1eb..9461994f5795 100644 --- a/numpy/_core/einsumfunc.py +++ b/numpy/_core/einsumfunc.py @@ -2,12 +2,14 @@ Implementation of optimized einsum. """ +import functools import itertools import operator -from numpy._core.multiarray import c_einsum -from numpy._core.numeric import asanyarray, tensordot +from numpy._core.multiarray import c_einsum, matmul +from numpy._core.numeric import asanyarray, reshape from numpy._core.overrides import array_function_dispatch +from numpy._core.umath import multiply __all__ = ['einsum', 'einsum_path'] @@ -440,116 +442,6 @@ def _greedy_path(input_sets, output_set, idx_dict, memory_limit): return path -def _can_dot(inputs, result, idx_removed): - """ - Checks if we can use BLAS (np.tensordot) call and its beneficial to do so. - - Parameters - ---------- - inputs : list of str - Specifies the subscripts for summation. - result : str - Resulting summation. - idx_removed : set - Indices that are removed in the summation - - - Returns - ------- - type : bool - Returns true if BLAS should and can be used, else False - - Notes - ----- - If the operations is BLAS level 1 or 2 and is not already aligned - we default back to einsum as the memory movement to copy is more - costly than the operation itself. - - - Examples - -------- - - # Standard GEMM operation - >>> _can_dot(['ij', 'jk'], 'ik', set('j')) - True - - # Can use the standard BLAS, but requires odd data movement - >>> _can_dot(['ijj', 'jk'], 'ik', set('j')) - False - - # DDOT where the memory is not aligned - >>> _can_dot(['ijk', 'ikj'], '', set('ijk')) - False - - """ - - # All `dot` calls remove indices - if len(idx_removed) == 0: - return False - - # BLAS can only handle two operands - if len(inputs) != 2: - return False - - input_left, input_right = inputs - - for c in set(input_left + input_right): - # can't deal with repeated indices on same input or more than 2 total - nl, nr = input_left.count(c), input_right.count(c) - if (nl > 1) or (nr > 1) or (nl + nr > 2): - return False - - # can't do implicit summation or dimension collapse e.g. - # "ab,bc->c" (implicitly sum over 'a') - # "ab,ca->ca" (take diagonal of 'a') - if nl + nr - 1 == int(c in result): - return False - - # Build a few temporaries - set_left = set(input_left) - set_right = set(input_right) - keep_left = set_left - idx_removed - keep_right = set_right - idx_removed - rs = len(idx_removed) - - # At this point we are a DOT, GEMV, or GEMM operation - - # Handle inner products - - # DDOT with aligned data - if input_left == input_right: - return True - - # DDOT without aligned data (better to use einsum) - if set_left == set_right: - return False - - # Handle the 4 possible (aligned) GEMV or GEMM cases - - # GEMM or GEMV no transpose - if input_left[-rs:] == input_right[:rs]: - return True - - # GEMM or GEMV transpose both - if input_left[:rs] == input_right[-rs:]: - return True - - # GEMM or GEMV transpose right - if input_left[-rs:] == input_right[-rs:]: - return True - - # GEMM or GEMV transpose left - if input_left[:rs] == input_right[:rs]: - return True - - # Einsum is faster than GEMV if we have to copy data - if not keep_left or not keep_right: - return False - - # We are a matrix-matrix product, but we need to copy data - return True - - def _parse_einsum_input(operands): """ A reproduction of einsum c side einsum parsing in python. @@ -887,13 +779,14 @@ def einsum_path(*operands, optimize='greedy', einsum_call=False): # Build a few useful list and sets input_list = input_subscripts.split(',') + num_inputs = len(input_list) input_sets = [set(x) for x in input_list] output_set = set(output_subscript) indices = set(input_subscripts.replace(',', '')) + num_indices = len(indices) # Get length of each unique dimension and ensure all dimensions are correct dimension_dict = {} - broadcast_indices = [[] for x in range(len(input_list))] for tnum, term in enumerate(input_list): sh = operands[tnum].shape if len(sh) != len(term): @@ -903,10 +796,6 @@ def einsum_path(*operands, optimize='greedy', einsum_call=False): for cnum, char in enumerate(term): dim = sh[cnum] - # Build out broadcast indices - if dim == 1: - broadcast_indices[tnum].append(char) - if char in dimension_dict.keys(): # For broadcasting cases we always want the largest dim size if dimension_dict[char] == 1: @@ -918,9 +807,6 @@ def einsum_path(*operands, optimize='greedy', einsum_call=False): else: dimension_dict[char] = dim - # Convert broadcast inds to sets - broadcast_indices = [set(x) for x in broadcast_indices] - # Compute size of each input array plus the output array size_list = [_compute_size_by_dict(term, dimension_dict) for term in input_list + [output_subscript]] @@ -931,23 +817,16 @@ def einsum_path(*operands, optimize='greedy', einsum_call=False): else: memory_arg = memory_limit - # Compute naive cost - # This isn't quite right, need to look into exactly how einsum does this - inner_product = (sum(len(x) for x in input_sets) - len(indices)) > 0 - naive_cost = _flop_count( - indices, inner_product, len(input_list), dimension_dict - ) - # Compute the path if explicit_einsum_path: path = path_type[1:] elif ( (path_type is False) - or (len(input_list) in [1, 2]) + or (num_inputs in [1, 2]) or (indices == output_set) ): # Nothing to be optimized, leave it to einsum - path = [tuple(range(len(input_list)))] + path = [tuple(range(num_inputs))] elif path_type == "greedy": path = _greedy_path( input_sets, output_set, dimension_dict, memory_arg @@ -969,26 +848,18 @@ def einsum_path(*operands, optimize='greedy', einsum_call=False): contract = _find_contraction(contract_inds, input_sets, output_set) out_inds, input_sets, idx_removed, idx_contract = contract - cost = _flop_count( - idx_contract, idx_removed, len(contract_inds), dimension_dict - ) - cost_list.append(cost) - scale_list.append(len(idx_contract)) - size_list.append(_compute_size_by_dict(out_inds, dimension_dict)) + if not einsum_call_arg: + # these are only needed for printing info + cost = _flop_count( + idx_contract, idx_removed, len(contract_inds), dimension_dict + ) + cost_list.append(cost) + scale_list.append(len(idx_contract)) + size_list.append(_compute_size_by_dict(out_inds, dimension_dict)) - bcast = set() tmp_inputs = [] for x in contract_inds: tmp_inputs.append(input_list.pop(x)) - bcast |= broadcast_indices.pop(x) - - new_bcast_inds = bcast - idx_removed - - # If we're broadcasting, nix blas - if not len(idx_removed & bcast): - do_blas = _can_dot(tmp_inputs, out_inds, idx_removed) - else: - do_blas = False # Last contraction if (cnum - len(path)) == -1: @@ -998,16 +869,11 @@ def einsum_path(*operands, optimize='greedy', einsum_call=False): idx_result = "".join([x[1] for x in sorted(sort_result)]) input_list.append(idx_result) - broadcast_indices.append(new_bcast_inds) einsum_str = ",".join(tmp_inputs) + "->" + idx_result - contraction = ( - contract_inds, idx_removed, einsum_str, input_list[:], do_blas - ) + contraction = (contract_inds, einsum_str, input_list[:]) contraction_list.append(contraction) - opt_cost = sum(cost_list) + 1 - if len(input_list) != 1: # Explicit "einsum_path" is usually trusted, but we detect this kind of # mistake in order to prevent from returning an intermediate value. @@ -1022,11 +888,21 @@ def einsum_path(*operands, optimize='greedy', einsum_call=False): overall_contraction = input_subscripts + "->" + output_subscript header = ("scaling", "current", "remaining") + # Compute naive cost + # This isn't quite right, need to look into exactly how einsum does this + inner_product = ( + sum(len(set(x)) for x in input_subscripts.split(',')) - num_indices + ) > 0 + naive_cost = _flop_count( + indices, inner_product, num_inputs, dimension_dict + ) + + opt_cost = sum(cost_list) + 1 speedup = naive_cost / opt_cost max_i = max(size_list) path_print = f" Complete contraction: {overall_contraction}\n" - path_print += f" Naive scaling: {len(indices)}\n" + path_print += f" Naive scaling: {num_indices}\n" path_print += " Optimized scaling: %d\n" % max(scale_list) path_print += f" Naive FLOP count: {naive_cost:.3e}\n" path_print += f" Optimized FLOP count: {opt_cost:.3e}\n" @@ -1037,7 +913,7 @@ def einsum_path(*operands, optimize='greedy', einsum_call=False): path_print += "-" * 74 for n, contraction in enumerate(contraction_list): - inds, idx_rm, einsum_str, remaining, blas = contraction + _, einsum_str, remaining = contraction remaining_str = ",".join(remaining) + "->" + output_subscript path_run = (scale_list[n], einsum_str, remaining_str) path_print += "\n%4d %24s %40s" % path_run @@ -1046,6 +922,317 @@ def einsum_path(*operands, optimize='greedy', einsum_call=False): return (path, path_print) +def _parse_eq_to_pure_multiplication(a_term, shape_a, b_term, shape_b, out): + """If there are no contracted indices, then we can directly transpose and + insert singleton dimensions into ``a`` and ``b`` such that (broadcast) + elementwise multiplication performs the einsum. + + No need to cache this as it is within the cached + ``_parse_eq_to_batch_matmul``. + + """ + desired_a = "" + desired_b = "" + new_shape_a = [] + new_shape_b = [] + for ix in out: + if ix in a_term: + desired_a += ix + new_shape_a.append(shape_a[a_term.index(ix)]) + else: + new_shape_a.append(1) + if ix in b_term: + desired_b += ix + new_shape_b.append(shape_b[b_term.index(ix)]) + else: + new_shape_b.append(1) + + if desired_a != a_term: + eq_a = f"{a_term}->{desired_a}" + else: + eq_a = None + if desired_b != b_term: + eq_b = f"{b_term}->{desired_b}" + else: + eq_b = None + + return ( + eq_a, + eq_b, + new_shape_a, + new_shape_b, + None, # new_shape_ab, not needed since not fusing + None, # perm_ab, not needed as we transpose a and b first + True, # pure_multiplication=True + ) + + +@functools.lru_cache(2**12) +def _parse_eq_to_batch_matmul(eq, shape_a, shape_b): + """Cached parsing of a two term einsum equation into the necessary + sequence of arguments for contracttion via batched matrix multiplication. + The steps we need to specify are: + + 1. Remove repeated and trivial indices from the left and right terms, + and transpose them, done as a single einsum. + 2. Fuse the remaining indices so we have two 3D tensors. + 3. Perform the batched matrix multiplication. + 4. Unfuse the output to get the desired final index order. + + """ + lhs, out = eq.split("->") + a_term, b_term = lhs.split(",") + + if len(a_term) != len(shape_a): + raise ValueError(f"Term '{a_term}' does not match shape {shape_a}.") + if len(b_term) != len(shape_b): + raise ValueError(f"Term '{b_term}' does not match shape {shape_b}.") + + sizes = {} + singletons = set() + + # parse left term to unique indices with size > 1 + left = {} + for ix, d in zip(a_term, shape_a): + if d == 1: + # everything (including broadcasting) works nicely if simply ignore + # such dimensions, but we do need to track if they appear in output + # and thus should be reintroduced later + singletons.add(ix) + continue + if sizes.setdefault(ix, d) != d: + # set and check size + raise ValueError( + f"Index {ix} has mismatched sizes {sizes[ix]} and {d}." + ) + left[ix] = True + + # parse right term to unique indices with size > 1 + right = {} + for ix, d in zip(b_term, shape_b): + # broadcast indices (size 1 on one input and size != 1 + # on the other) should not be treated as singletons + if d == 1: + if ix not in left: + singletons.add(ix) + continue + singletons.discard(ix) + + if sizes.setdefault(ix, d) != d: + # set and check size + raise ValueError( + f"Index {ix} has mismatched sizes {sizes[ix]} and {d}." + ) + right[ix] = True + + # now we classify the unique size > 1 indices only + bat_inds = [] # appears on A, B, O + con_inds = [] # appears on A, B, . + a_keep = [] # appears on A, ., O + b_keep = [] # appears on ., B, O + # other indices (appearing on A or B only) will + # be summed or traced out prior to the matmul + for ix in left: + if right.pop(ix, False): + if ix in out: + bat_inds.append(ix) + else: + con_inds.append(ix) + elif ix in out: + a_keep.append(ix) + # now only indices unique to right remain + for ix in right: + if ix in out: + b_keep.append(ix) + + if not con_inds: + # contraction is pure multiplication, prepare inputs differently + return _parse_eq_to_pure_multiplication( + a_term, shape_a, b_term, shape_b, out + ) + + # only need the size one indices that appear in the output + singletons = [ix for ix in out if ix in singletons] + + # take diagonal, remove any trivial axes and transpose left + desired_a = "".join((*bat_inds, *a_keep, *con_inds)) + if a_term != desired_a: + eq_a = f"{a_term}->{desired_a}" + else: + eq_a = None + + # take diagonal, remove any trivial axes and transpose right + desired_b = "".join((*bat_inds, *con_inds, *b_keep)) + if b_term != desired_b: + eq_b = f"{b_term}->{desired_b}" + else: + eq_b = None + + # then we want to reshape + if bat_inds: + lgroups = (bat_inds, a_keep, con_inds) + rgroups = (bat_inds, con_inds, b_keep) + ogroups = (bat_inds, a_keep, b_keep) + else: + # avoid size 1 batch dimension if no batch indices + lgroups = (a_keep, con_inds) + rgroups = (con_inds, b_keep) + ogroups = (a_keep, b_keep) + + if any(len(group) != 1 for group in lgroups): + # need to fuse 'kept' and contracted indices + # (though could allow batch indices to be broadcast) + new_shape_a = tuple( + functools.reduce(operator.mul, (sizes[ix] for ix in ix_group), 1) + for ix_group in lgroups + ) + else: + new_shape_a = None + + if any(len(group) != 1 for group in rgroups): + # need to fuse 'kept' and contracted indices + # (though could allow batch indices to be broadcast) + new_shape_b = tuple( + functools.reduce(operator.mul, (sizes[ix] for ix in ix_group), 1) + for ix_group in rgroups + ) + else: + new_shape_b = None + + if any(len(group) != 1 for group in ogroups) or singletons: + new_shape_ab = (1,) * len(singletons) + tuple( + sizes[ix] for ix_group in ogroups for ix in ix_group + ) + else: + new_shape_ab = None + + # then we might need to permute the matmul produced output: + out_produced = "".join((*singletons, *bat_inds, *a_keep, *b_keep)) + if out_produced != out: + perm_ab = tuple(out_produced.index(ix) for ix in out) + else: + perm_ab = None + + return ( + eq_a, + eq_b, + new_shape_a, + new_shape_b, + new_shape_ab, + perm_ab, + False, # pure_multiplication=False + ) + + +@functools.lru_cache(maxsize=64) +def _parse_output_order(order, a_is_fcontig, b_is_fcontig): + order = order.upper() + if order == "K": + return None + elif order in "CF": + return order + elif order == "A": + if a_is_fcontig and b_is_fcontig: + return "F" + else: + return "C" + else: + raise ValueError( + "ValueError: order must be one of " + f"'C', 'F', 'A', or 'K' (got '{order}')" + ) + + +def bmm_einsum(eq, a, b, out=None, **kwargs): + """Perform arbitrary pairwise einsums using only ``matmul``, or + ``multiply`` if no contracted indices are involved (plus maybe single term + ``einsum`` to prepare the terms individually). The logic for each is cached + based on the equation and array shape, and each step is only performed if + necessary. + + Parameters + ---------- + eq : str + The einsum equation. + a : array_like + The first array to contract. + b : array_like + The second array to contract. + + Returns + ------- + array_like + + Notes + ----- + A fuller description of this algorithm, and original source for this + implementation, can be found at https://github.com/jcmgray/einsum_bmm. + """ + ( + eq_a, + eq_b, + new_shape_a, + new_shape_b, + new_shape_ab, + perm_ab, + pure_multiplication, + ) = _parse_eq_to_batch_matmul(eq, a.shape, b.shape) + + # n.b. one could special case various cases to call c_einsum directly here + + # need to handle `order` a little manually, since we do transpose + # operations before and potentially after the ufunc calls + output_order = _parse_output_order( + kwargs.pop("order", "K"), a.flags.f_contiguous, b.flags.f_contiguous + ) + + # prepare left + if eq_a is not None: + # diagonals, sums, and tranpose + a = c_einsum(eq_a, a) + if new_shape_a is not None: + a = reshape(a, new_shape_a) + + # prepare right + if eq_b is not None: + # diagonals, sums, and tranpose + b = c_einsum(eq_b, b) + if new_shape_b is not None: + b = reshape(b, new_shape_b) + + if pure_multiplication: + # no contracted indices + if output_order is not None: + kwargs["order"] = output_order + + # do the 'contraction' via multiplication! + return multiply(a, b, out=out, **kwargs) + + # can only supply out here if no other reshaping / transposing + matmul_out_compatible = (new_shape_ab is None) and (perm_ab is None) + if matmul_out_compatible: + kwargs["out"] = out + + # do the contraction! + ab = matmul(a, b, **kwargs) + + # prepare the output + if new_shape_ab is not None: + ab = reshape(ab, new_shape_ab) + if perm_ab is not None: + ab = ab.transpose(perm_ab) + + if (out is not None) and (not matmul_out_compatible): + # handle case where out is specified, but we also needed + # to reshape / transpose ``ab`` after the matmul + out[:] = ab + ab = out + elif output_order is not None: + ab = asanyarray(ab, order=output_order) + + return ab + + def _einsum_dispatcher(*operands, out=None, optimize=None, **kwargs): # Arguably we dispatch on more arguments than we really should; see note in # _einsum_path_dispatcher for why. @@ -1434,58 +1621,23 @@ def einsum(*operands, out=None, optimize=False, **kwargs): operands, contraction_list = einsum_path(*operands, optimize=optimize, einsum_call=True) - # Handle order kwarg for output array, c_einsum allows mixed case - output_order = kwargs.pop('order', 'K') - if output_order.upper() == 'A': - if all(arr.flags.f_contiguous for arr in operands): - output_order = 'F' - else: - output_order = 'C' - # Start contraction loop for num, contraction in enumerate(contraction_list): - inds, idx_rm, einsum_str, remaining, blas = contraction + inds, einsum_str, _ = contraction tmp_operands = [operands.pop(x) for x in inds] # Do we need to deal with the output? handle_out = specified_out and ((num + 1) == len(contraction_list)) - # Call tensordot if still possible - if blas: - # Checks have already been handled - input_str, results_index = einsum_str.split('->') - input_left, input_right = input_str.split(',') - - tensor_result = input_left + input_right - for s in idx_rm: - tensor_result = tensor_result.replace(s, "") - - # Find indices to contract over - left_pos, right_pos = [], [] - for s in sorted(idx_rm): - left_pos.append(input_left.find(s)) - right_pos.append(input_right.find(s)) - - # Contract! - new_view = tensordot( - *tmp_operands, axes=(tuple(left_pos), tuple(right_pos)) - ) - - # Build a new view if needed - if (tensor_result != results_index) or handle_out: - if handle_out: - kwargs["out"] = out - new_view = c_einsum( - tensor_result + '->' + results_index, new_view, **kwargs - ) + # If out was specified + if handle_out: + kwargs["out"] = out - # Call einsum + if len(tmp_operands) == 2: + # Call (batched) matrix multiplication if possible + new_view = bmm_einsum(einsum_str, *tmp_operands, **kwargs) else: - # If out was specified - if handle_out: - kwargs["out"] = out - - # Do the contraction + # Call einsum new_view = c_einsum(einsum_str, *tmp_operands, **kwargs) # Append new items and dereference what we can @@ -1495,4 +1647,4 @@ def einsum(*operands, out=None, optimize=False, **kwargs): if specified_out: return out else: - return asanyarray(operands[0], order=output_order) + return operands[0] diff --git a/numpy/_core/einsumfunc.pyi b/numpy/_core/einsumfunc.pyi index 9653a26dcd78..3e42ef6dc238 100644 --- a/numpy/_core/einsumfunc.pyi +++ b/numpy/_core/einsumfunc.pyi @@ -1,8 +1,8 @@ from collections.abc import Sequence -from typing import Any, Literal, TypeAlias, TypeVar, overload +from typing import Any, Literal, overload import numpy as np -from numpy import _OrderKACF, number +from numpy import _OrderKACF from numpy._typing import ( NDArray, _ArrayLikeBool_co, @@ -22,14 +22,9 @@ from numpy._typing import ( __all__ = ["einsum", "einsum_path"] -_ArrayT = TypeVar( - "_ArrayT", - bound=NDArray[np.bool | number], -) - -_OptimizeKind: TypeAlias = bool | Literal["greedy", "optimal"] | Sequence[Any] | None -_CastingSafe: TypeAlias = Literal["no", "equiv", "safe", "same_kind"] -_CastingUnsafe: TypeAlias = Literal["unsafe"] +type _OptimizeKind = bool | Literal["greedy", "optimal"] | Sequence[Any] | None +type _CastingSafe = Literal["no", "equiv", "safe", "same_kind"] +type _CastingUnsafe = Literal["unsafe"] # TODO: Properly handle the `casting`-based combinatorics # TODO: We need to evaluate the content `__subscripts` in order @@ -42,55 +37,55 @@ def einsum( subscripts: str | _ArrayLikeInt_co, /, *operands: _ArrayLikeBool_co, - out: None = ..., + out: None = None, dtype: _DTypeLikeBool | None = ..., order: _OrderKACF = ..., casting: _CastingSafe = ..., - optimize: _OptimizeKind = ..., + optimize: _OptimizeKind = False, ) -> Any: ... @overload def einsum( subscripts: str | _ArrayLikeInt_co, /, *operands: _ArrayLikeUInt_co, - out: None = ..., + out: None = None, dtype: _DTypeLikeUInt | None = ..., order: _OrderKACF = ..., casting: _CastingSafe = ..., - optimize: _OptimizeKind = ..., + optimize: _OptimizeKind = False, ) -> Any: ... @overload def einsum( subscripts: str | _ArrayLikeInt_co, /, *operands: _ArrayLikeInt_co, - out: None = ..., + out: None = None, dtype: _DTypeLikeInt | None = ..., order: _OrderKACF = ..., casting: _CastingSafe = ..., - optimize: _OptimizeKind = ..., + optimize: _OptimizeKind = False, ) -> Any: ... @overload def einsum( subscripts: str | _ArrayLikeInt_co, /, *operands: _ArrayLikeFloat_co, - out: None = ..., + out: None = None, dtype: _DTypeLikeFloat | None = ..., order: _OrderKACF = ..., casting: _CastingSafe = ..., - optimize: _OptimizeKind = ..., + optimize: _OptimizeKind = False, ) -> Any: ... @overload def einsum( subscripts: str | _ArrayLikeInt_co, /, *operands: _ArrayLikeComplex_co, - out: None = ..., + out: None = None, dtype: _DTypeLikeComplex | None = ..., order: _OrderKACF = ..., casting: _CastingSafe = ..., - optimize: _OptimizeKind = ..., + optimize: _OptimizeKind = False, ) -> Any: ... @overload def einsum( @@ -99,43 +94,43 @@ def einsum( *operands: Any, casting: _CastingUnsafe, dtype: _DTypeLikeComplex_co | None = ..., - out: None = ..., + out: None = None, order: _OrderKACF = ..., - optimize: _OptimizeKind = ..., + optimize: _OptimizeKind = False, ) -> Any: ... @overload -def einsum( +def einsum[OutT: NDArray[np.bool | np.number]]( subscripts: str | _ArrayLikeInt_co, /, *operands: _ArrayLikeComplex_co, - out: _ArrayT, + out: OutT, dtype: _DTypeLikeComplex_co | None = ..., order: _OrderKACF = ..., casting: _CastingSafe = ..., - optimize: _OptimizeKind = ..., -) -> _ArrayT: ... + optimize: _OptimizeKind = False, +) -> OutT: ... @overload -def einsum( +def einsum[OutT: NDArray[np.bool | np.number]]( subscripts: str | _ArrayLikeInt_co, /, *operands: Any, - out: _ArrayT, + out: OutT, casting: _CastingUnsafe, dtype: _DTypeLikeComplex_co | None = ..., order: _OrderKACF = ..., - optimize: _OptimizeKind = ..., -) -> _ArrayT: ... + optimize: _OptimizeKind = False, +) -> OutT: ... @overload def einsum( subscripts: str | _ArrayLikeInt_co, /, *operands: _ArrayLikeObject_co, - out: None = ..., + out: None = None, dtype: _DTypeLikeObject | None = ..., order: _OrderKACF = ..., casting: _CastingSafe = ..., - optimize: _OptimizeKind = ..., + optimize: _OptimizeKind = False, ) -> Any: ... @overload def einsum( @@ -144,32 +139,32 @@ def einsum( *operands: Any, casting: _CastingUnsafe, dtype: _DTypeLikeObject | None = ..., - out: None = ..., + out: None = None, order: _OrderKACF = ..., - optimize: _OptimizeKind = ..., + optimize: _OptimizeKind = False, ) -> Any: ... @overload -def einsum( +def einsum[OutT: NDArray[np.bool | np.number]]( subscripts: str | _ArrayLikeInt_co, /, *operands: _ArrayLikeObject_co, - out: _ArrayT, + out: OutT, dtype: _DTypeLikeObject | None = ..., order: _OrderKACF = ..., casting: _CastingSafe = ..., - optimize: _OptimizeKind = ..., -) -> _ArrayT: ... + optimize: _OptimizeKind = False, +) -> OutT: ... @overload -def einsum( +def einsum[OutT: NDArray[np.bool | np.number]]( subscripts: str | _ArrayLikeInt_co, /, *operands: Any, - out: _ArrayT, + out: OutT, casting: _CastingUnsafe, dtype: _DTypeLikeObject | None = ..., order: _OrderKACF = ..., - optimize: _OptimizeKind = ..., -) -> _ArrayT: ... + optimize: _OptimizeKind = False, +) -> OutT: ... # NOTE: `einsum_call` is a hidden kwarg unavailable for public use. # It is therefore excluded from the signatures below. diff --git a/numpy/_core/fromnumeric.py b/numpy/_core/fromnumeric.py index 73dcd1ddc11d..dd94b4d0bed9 100644 --- a/numpy/_core/fromnumeric.py +++ b/numpy/_core/fromnumeric.py @@ -2,16 +2,13 @@ """ import functools +import math import types -import warnings import numpy as np from numpy._utils import set_module -from . import _methods, overrides -from . import multiarray as mu -from . import numerictypes as nt -from . import umath as um +from . import _methods, multiarray as mu, numerictypes as nt, overrides, umath as um from ._multiarray_umath import _array_converter from .multiarray import asanyarray, asarray, concatenate @@ -172,7 +169,7 @@ def take(a, indices, axis=None, out=None, mode='raise'): Ni, Nk = a.shape[:axis], a.shape[axis+1:] for ii in ndindex(Ni): - for kk in ndindex(Nj): + for kk in ndindex(Nk): out[ii + s_[...,] + kk] = a[ii + s_[:,] + kk][indices] For this reason, it is equivalent to (but faster than) the following use @@ -203,13 +200,12 @@ def take(a, indices, axis=None, out=None, mode='raise'): return _wrapfunc(a, 'take', indices, axis=axis, out=out, mode=mode) -def _reshape_dispatcher(a, /, shape=None, order=None, *, newshape=None, - copy=None): +def _reshape_dispatcher(a, /, shape, order=None, *, copy=None): return (a,) @array_function_dispatch(_reshape_dispatcher) -def reshape(a, /, shape=None, order='C', *, newshape=None, copy=None): +def reshape(a, /, shape, order='C', *, copy=None): """ Gives a new shape to an array without changing its data. @@ -235,10 +231,6 @@ def reshape(a, /, shape=None, order='C', *, newshape=None, copy=None): 'A' means to read / write the elements in Fortran-like index order if ``a`` is Fortran *contiguous* in memory, C-like order otherwise. - newshape : int or tuple of ints - .. deprecated:: 2.1 - Replaced by ``shape`` argument. Retained for backward - compatibility. copy : bool, optional If ``True``, then the array data is copied. If ``None``, a copy will only be made if it's required by ``order``. For ``False`` it raises @@ -302,23 +294,6 @@ def reshape(a, /, shape=None, order='C', *, newshape=None, copy=None): [3, 4], [5, 6]]) """ - if newshape is None and shape is None: - raise TypeError( - "reshape() missing 1 required positional argument: 'shape'") - if newshape is not None: - if shape is not None: - raise TypeError( - "You cannot specify 'newshape' and 'shape' arguments " - "at the same time.") - # Deprecated in NumPy 2.1, 2024-04-18 - warnings.warn( - "`newshape` keyword argument is deprecated, " - "use `shape=...` or pass shape positionally instead. " - "(deprecated in NumPy 2.1)", - DeprecationWarning, - stacklevel=2, - ) - shape = newshape if copy is not None: return _wrapfunc(a, 'reshape', shape, order=order, copy=copy) return _wrapfunc(a, 'reshape', shape, order=order) @@ -779,8 +754,6 @@ def partition(a, kth, axis=-1, kind='introselect', order=None): provided with a sequence of k-th it will partition all elements indexed by k-th of them into their sorted position at once. - .. deprecated:: 1.22.0 - Passing booleans as index is deprecated. axis : int or None, optional Axis along which to sort. If None, the array is flattened before sorting. The default is -1, which sorts along the last axis. @@ -892,8 +865,6 @@ def argpartition(a, kth, axis=-1, kind='introselect', order=None): sequence of k-th it will partition all of them into their sorted position at once. - .. deprecated:: 1.22.0 - Passing booleans as index is deprecated. axis : int or None, optional Axis along which to sort. The default is -1 (the last axis). If None, the flattened array is used. @@ -1306,6 +1277,8 @@ def argmax(a, axis=None, out=None, *, keepdims=np._NoValue): Indexes of the maximal elements of a N-dimensional array: + >>> a.flat[np.argmax(a)] + 15 >>> ind = np.unravel_index(np.argmax(a, axis=None), a.shape) >>> ind (1, 2) @@ -1404,6 +1377,8 @@ def argmin(a, axis=None, out=None, *, keepdims=np._NoValue): Indices of the minimum elements of a N-dimensional array: + >>> a.flat[np.argmin(a)] + 10 >>> ind = np.unravel_index(np.argmin(a, axis=None), a.shape) >>> ind (0, 0) @@ -1607,7 +1582,8 @@ def resize(a, new_shape): # First case must zero fill. The second would have repeats == 0. return np.zeros_like(a, shape=new_shape) - repeats = -(-new_size // a.size) # ceil division + # ceiling division without negating new_size + repeats = (new_size + a.size - 1) // a.size a = concatenate((a,) * repeats)[:new_size] return reshape(a, new_shape) @@ -2027,15 +2003,6 @@ def nonzero(a): To group the indices by element, rather than dimension, use `argwhere`, which returns a row for each non-zero element. - .. note:: - - When called on a zero-d array or scalar, ``nonzero(a)`` is treated - as ``nonzero(atleast_1d(a))``. - - .. deprecated:: 1.17.0 - - Use `atleast_1d` explicitly if this behavior is deliberate. - Parameters ---------- a : array_like @@ -2059,7 +2026,7 @@ def nonzero(a): Notes ----- While the nonzero values can be obtained with ``a[nonzero(a)]``, it is - recommended to use ``x[x.astype(bool)]`` or ``x[x != 0]`` instead, which + recommended to use ``x[x.astype(np.bool)]`` or ``x[x != 0]`` instead, which will correctly handle 0-d arrays. Examples @@ -2418,7 +2385,7 @@ def sum(a, axis=None, dtype=None, out=None, keepdims=np._NoValue, more precise approach to summation. Especially when summing a large number of lower precision floating point numbers, such as ``float32``, numerical errors can become significant. - In such cases it can be advisable to use `dtype="float64"` to use a higher + In such cases it can be advisable to use `dtype=np.float64` to use a higher precision for the output. Examples @@ -2449,19 +2416,12 @@ def sum(a, axis=None, dtype=None, out=None, keepdims=np._NoValue, """ if isinstance(a, _gentype): # 2018-02-25, 1.15.0 - warnings.warn( - "Calling np.sum(generator) is deprecated, and in the future will " - "give a different result. Use np.sum(np.fromiter(generator)) or " + raise TypeError( + "Calling np.sum(generator) is deprecated." + "Use np.sum(np.fromiter(generator)) or " "the python sum builtin instead.", - DeprecationWarning, stacklevel=2 ) - res = _sum_(a) - if out is not None: - out[...] = res - return out - return res - return _wrapreduction( a, np.add, 'sum', axis, dtype, out, keepdims=keepdims, initial=initial, where=where @@ -2761,7 +2721,7 @@ def cumulative_prod(x, /, *, axis=None, dtype=None, out=None, ... # total product 1*2*3 = 6 array([1, 2, 6]) >>> a = np.array([1, 2, 3, 4, 5, 6]) - >>> np.cumulative_prod(a, dtype=float) # specify type of output + >>> np.cumulative_prod(a, dtype=np.float64) # specify type of output array([ 1., 2., 6., 24., 120., 720.]) The cumulative product for each column (i.e., over the rows) of ``b``: @@ -2848,7 +2808,7 @@ def cumulative_sum(x, /, *, axis=None, dtype=None, out=None, array([1, 2, 3, 4, 5, 6]) >>> np.cumulative_sum(a) array([ 1, 3, 6, 10, 15, 21]) - >>> np.cumulative_sum(a, dtype=float) # specifies type of output value(s) + >>> np.cumulative_sum(a, dtype=np.float64) # specifies type of output value(s) array([ 1., 3., 6., 10., 15., 21.]) >>> b = np.array([[1, 2, 3], [4, 5, 6]]) @@ -2932,7 +2892,7 @@ def cumsum(a, axis=None, dtype=None, out=None): [4, 5, 6]]) >>> np.cumsum(a) array([ 1, 3, 6, 10, 15, 21]) - >>> np.cumsum(a, dtype=float) # specifies type of output value(s) + >>> np.cumsum(a, dtype=np.float64) # specifies type of output value(s) array([ 1., 3., 6., 10., 15., 21.]) >>> np.cumsum(a,axis=0) # sum over rows for each of the 3 columns @@ -3136,7 +3096,7 @@ def max(a, axis=None, out=None, keepdims=np._NoValue, initial=np._NoValue, array([1, 3]) >>> np.max(a, where=[False, True], initial=-1, axis=0) array([-1, 3]) - >>> b = np.arange(5, dtype=float) + >>> b = np.arange(5, dtype=np.float64) >>> b[2] = np.nan >>> np.max(b) np.float64(nan) @@ -3275,7 +3235,7 @@ def min(a, axis=None, out=None, keepdims=np._NoValue, initial=np._NoValue, >>> np.min(a, where=[False, True], initial=10, axis=0) array([10, 1]) - >>> b = np.arange(5, dtype=float) + >>> b = np.arange(5, dtype=np.float64) >>> b[2] = np.nan >>> np.min(b) np.float64(nan) @@ -3496,7 +3456,7 @@ def cumprod(a, axis=None, dtype=None, out=None): ... # total product 1*2*3 = 6 array([1, 2, 6]) >>> a = np.array([[1, 2, 3], [4, 5, 6]]) - >>> np.cumprod(a, dtype=float) # specify type of output + >>> np.cumprod(a, dtype=np.float64) # specify type of output array([ 1., 2., 6., 24., 120., 720.]) The cumulative product for each column (i.e., over the rows) of `a`: @@ -3571,10 +3531,13 @@ def size(a, axis=None): ---------- a : array_like Input data. - axis : int, optional - Axis along which the elements are counted. By default, give + axis : None or int or tuple of ints, optional + Axis or axes along which the elements are counted. By default, give the total number of elements. + .. versionchanged:: 2.4 + Extended to accept multiple axes. + Returns ------- element_count : int @@ -3592,10 +3555,12 @@ def size(a, axis=None): >>> a = np.array([[1,2,3],[4,5,6]]) >>> np.size(a) 6 - >>> np.size(a,1) + >>> np.size(a,axis=1) 3 - >>> np.size(a,0) + >>> np.size(a,axis=0) 2 + >>> np.size(a,axis=(0,1)) + 6 """ if axis is None: @@ -3604,10 +3569,10 @@ def size(a, axis=None): except AttributeError: return asarray(a).size else: - try: - return a.shape[axis] - except AttributeError: - return asarray(a).shape[axis] + _shape = shape(a) + from .numeric import normalize_axis_tuple + axis = normalize_axis_tuple(axis, len(_shape), allow_duplicate=False) + return math.prod(_shape[ax] for ax in axis) def _round_dispatcher(a, decimals=None, out=None): diff --git a/numpy/_core/fromnumeric.pyi b/numpy/_core/fromnumeric.pyi index 050eb9f75c40..ce08335ac256 100644 --- a/numpy/_core/fromnumeric.pyi +++ b/numpy/_core/fromnumeric.pyi @@ -1,4 +1,5 @@ # ruff: noqa: ANN401 +from _typeshed import Incomplete from collections.abc import Sequence from typing import ( Any, @@ -6,18 +7,14 @@ from typing import ( Never, Protocol, SupportsIndex, - TypeAlias, - TypeVar, + TypedDict, + Unpack, overload, type_check_only, ) -from _typeshed import Incomplete -from typing_extensions import deprecated - import numpy as np from numpy import ( - _AnyShapeT, _CastingKind, _ModeKind, _OrderACF, @@ -28,12 +25,10 @@ from numpy import ( complexfloating, float16, floating, - generic, int64, int_, intp, object_, - timedelta64, uint64, ) from numpy._globals import _NoValueType @@ -57,6 +52,7 @@ from numpy._typing import ( _NestedSequence, _NumberLike_co, _ScalarLike_co, + _Shape, _ShapeLike, ) @@ -107,102 +103,113 @@ __all__ = [ "var", ] -_ScalarT = TypeVar("_ScalarT", bound=generic) -_NumberOrObjectT = TypeVar("_NumberOrObjectT", bound=np.number | np.object_) -_ArrayT = TypeVar("_ArrayT", bound=np.ndarray[Any, Any]) -_ShapeT = TypeVar("_ShapeT", bound=tuple[int, ...]) -_ShapeT_co = TypeVar("_ShapeT_co", bound=tuple[int, ...], covariant=True) -_BoolOrIntArrayT = TypeVar("_BoolOrIntArrayT", bound=NDArray[np.integer | np.bool]) - @type_check_only -class _SupportsShape(Protocol[_ShapeT_co]): +class _SupportsShape[ShapeT_co: _Shape](Protocol): # NOTE: it matters that `self` is positional only @property - def shape(self, /) -> _ShapeT_co: ... + def shape(self, /) -> ShapeT_co: ... + +@type_check_only +class _UFuncKwargs(TypedDict, total=False): + where: _ArrayLikeBool_co | None + order: _OrderKACF + subok: bool + signature: str | tuple[str | None, ...] + casting: _CastingKind # a "sequence" that isn't a string, bytes, bytearray, or memoryview -_T = TypeVar("_T") -_PyArray: TypeAlias = list[_T] | tuple[_T, ...] +type _PyArray[_T] = list[_T] | tuple[_T, ...] # `int` also covers `bool` -_PyScalar: TypeAlias = complex | bytes | str +type _PyScalar = complex | bytes | str +type _0D = tuple[()] +type _1D = tuple[int] +type _2D = tuple[int, int] +type _3D = tuple[int, int, int] +type _4D = tuple[int, int, int, int] + +type _Array1D[ScalarT: np.generic] = np.ndarray[_1D, np.dtype[ScalarT]] + +### + +# TODO: Fix overlapping overloads: https://github.com/numpy/numpy/issues/27032 @overload -def take( - a: _ArrayLike[_ScalarT], +def take[ScalarT: np.generic]( + a: _ArrayLike[ScalarT], indices: _IntLike_co, - axis: None = ..., - out: None = ..., - mode: _ModeKind = ..., -) -> _ScalarT: ... + axis: None = None, + out: None = None, + mode: _ModeKind = "raise", +) -> ScalarT: ... @overload def take( a: ArrayLike, indices: _IntLike_co, - axis: SupportsIndex | None = ..., - out: None = ..., - mode: _ModeKind = ..., + axis: SupportsIndex | None = None, + out: None = None, + mode: _ModeKind = "raise", ) -> Any: ... @overload -def take( - a: _ArrayLike[_ScalarT], +def take[ScalarT: np.generic]( + a: _ArrayLike[ScalarT], indices: _ArrayLikeInt_co, - axis: SupportsIndex | None = ..., - out: None = ..., - mode: _ModeKind = ..., -) -> NDArray[_ScalarT]: ... + axis: SupportsIndex | None = None, + out: None = None, + mode: _ModeKind = "raise", +) -> NDArray[ScalarT]: ... @overload def take( a: ArrayLike, indices: _ArrayLikeInt_co, - axis: SupportsIndex | None = ..., - out: None = ..., - mode: _ModeKind = ..., + axis: SupportsIndex | None = None, + out: None = None, + mode: _ModeKind = "raise", ) -> NDArray[Any]: ... @overload -def take( +def take[ArrayT: np.ndarray]( a: ArrayLike, indices: _ArrayLikeInt_co, axis: SupportsIndex | None, - out: _ArrayT, - mode: _ModeKind = ..., -) -> _ArrayT: ... + out: ArrayT, + mode: _ModeKind = "raise", +) -> ArrayT: ... @overload -def take( +def take[ArrayT: np.ndarray]( a: ArrayLike, indices: _ArrayLikeInt_co, - axis: SupportsIndex | None = ..., + axis: SupportsIndex | None = None, *, - out: _ArrayT, - mode: _ModeKind = ..., -) -> _ArrayT: ... + out: ArrayT, + mode: _ModeKind = "raise", +) -> ArrayT: ... -@overload -def reshape( # shape: index - a: _ArrayLike[_ScalarT], +@overload # shape: index +def reshape[ScalarT: np.generic]( + a: _ArrayLike[ScalarT], /, shape: SupportsIndex, order: _OrderACF = "C", *, copy: bool | None = None, -) -> np.ndarray[tuple[int], np.dtype[_ScalarT]]: ... -@overload -def reshape( # shape: (int, ...) @ _AnyShapeT - a: _ArrayLike[_ScalarT], +) -> _Array1D[ScalarT]: ... +@overload # shape: ~ShapeT +def reshape[ScalarT: np.generic, ShapeT: _Shape]( + a: _ArrayLike[ScalarT], /, - shape: _AnyShapeT, + shape: ShapeT, order: _OrderACF = "C", *, copy: bool | None = None, -) -> np.ndarray[_AnyShapeT, np.dtype[_ScalarT]]: ... +) -> np.ndarray[ShapeT, np.dtype[ScalarT]]: ... @overload # shape: Sequence[index] -def reshape( - a: _ArrayLike[_ScalarT], +def reshape[ScalarT: np.generic]( + a: _ArrayLike[ScalarT], /, shape: Sequence[SupportsIndex], order: _OrderACF = "C", *, copy: bool | None = None, -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload # shape: index def reshape( a: ArrayLike, @@ -211,16 +218,16 @@ def reshape( order: _OrderACF = "C", *, copy: bool | None = None, -) -> np.ndarray[tuple[int], np.dtype]: ... -@overload -def reshape( # shape: (int, ...) @ _AnyShapeT +) -> np.ndarray[_1D]: ... +@overload # shape: ~ShapeT +def reshape[ShapeT: _Shape]( a: ArrayLike, /, - shape: _AnyShapeT, + shape: ShapeT, order: _OrderACF = "C", *, copy: bool | None = None, -) -> np.ndarray[_AnyShapeT, np.dtype]: ... +) -> np.ndarray[ShapeT]: ... @overload # shape: Sequence[index] def reshape( a: ArrayLike, @@ -230,69 +237,55 @@ def reshape( *, copy: bool | None = None, ) -> NDArray[Any]: ... -@overload -@deprecated( - "`newshape` keyword argument is deprecated, " - "use `shape=...` or pass shape positionally instead. " - "(deprecated in NumPy 2.1)", -) -def reshape( - a: ArrayLike, - /, - shape: None = None, - order: _OrderACF = "C", - *, - newshape: _ShapeLike, - copy: bool | None = None, -) -> NDArray[Any]: ... @overload def choose( a: _IntLike_co, choices: ArrayLike, - out: None = ..., - mode: _ModeKind = ..., + out: None = None, + mode: _ModeKind = "raise", ) -> Any: ... @overload -def choose( +def choose[ScalarT: np.generic]( a: _ArrayLikeInt_co, - choices: _ArrayLike[_ScalarT], - out: None = ..., - mode: _ModeKind = ..., -) -> NDArray[_ScalarT]: ... + choices: _ArrayLike[ScalarT], + out: None = None, + mode: _ModeKind = "raise", +) -> NDArray[ScalarT]: ... @overload def choose( a: _ArrayLikeInt_co, choices: ArrayLike, - out: None = ..., - mode: _ModeKind = ..., + out: None = None, + mode: _ModeKind = "raise", ) -> NDArray[Any]: ... @overload -def choose( +def choose[ArrayT: np.ndarray]( a: _ArrayLikeInt_co, choices: ArrayLike, - out: _ArrayT, - mode: _ModeKind = ..., -) -> _ArrayT: ... + out: ArrayT, + mode: _ModeKind = "raise", +) -> ArrayT: ... +# keep in sync with `ma.core.repeat` @overload -def repeat( - a: _ArrayLike[_ScalarT], +def repeat[ScalarT: np.generic]( + a: _ArrayLike[ScalarT], repeats: _ArrayLikeInt_co, axis: None = None, -) -> np.ndarray[tuple[int], np.dtype[_ScalarT]]: ... +) -> _Array1D[ScalarT]: ... @overload -def repeat( - a: _ArrayLike[_ScalarT], +def repeat[ScalarT: np.generic]( + a: _ArrayLike[ScalarT], repeats: _ArrayLikeInt_co, axis: SupportsIndex, -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload def repeat( a: ArrayLike, repeats: _ArrayLikeInt_co, axis: None = None, -) -> np.ndarray[tuple[int], np.dtype[Any]]: ... +) -> _Array1D[Any]: ... @overload def repeat( a: ArrayLike, @@ -300,51 +293,47 @@ def repeat( axis: SupportsIndex, ) -> NDArray[Any]: ... +# def put( a: NDArray[Any], ind: _ArrayLikeInt_co, v: ArrayLike, - mode: _ModeKind = ..., + mode: _ModeKind = "raise", ) -> None: ... +# keep in sync with `ndarray.swapaxes` and `ma.core.swapaxes` @overload -def swapaxes( - a: _ArrayLike[_ScalarT], - axis1: SupportsIndex, - axis2: SupportsIndex, -) -> NDArray[_ScalarT]: ... +def swapaxes[ArrayT: np.ndarray](a: ArrayT, axis1: SupportsIndex, axis2: SupportsIndex) -> ArrayT: ... @overload -def swapaxes( - a: ArrayLike, - axis1: SupportsIndex, - axis2: SupportsIndex, -) -> NDArray[Any]: ... +def swapaxes[ScalarT: np.generic](a: _ArrayLike[ScalarT], axis1: SupportsIndex, axis2: SupportsIndex) -> NDArray[ScalarT]: ... +@overload +def swapaxes(a: ArrayLike, axis1: SupportsIndex, axis2: SupportsIndex) -> NDArray[Any]: ... @overload -def transpose( - a: _ArrayLike[_ScalarT], - axes: _ShapeLike | None = ... -) -> NDArray[_ScalarT]: ... +def transpose[ScalarT: np.generic]( + a: _ArrayLike[ScalarT], + axes: _ShapeLike | None = None, +) -> NDArray[ScalarT]: ... @overload def transpose( a: ArrayLike, - axes: _ShapeLike | None = ... + axes: _ShapeLike | None = None, ) -> NDArray[Any]: ... @overload -def matrix_transpose(x: _ArrayLike[_ScalarT], /) -> NDArray[_ScalarT]: ... +def matrix_transpose[ScalarT: np.generic](x: _ArrayLike[ScalarT], /) -> NDArray[ScalarT]: ... @overload def matrix_transpose(x: ArrayLike, /) -> NDArray[Any]: ... # @overload -def partition( - a: _ArrayLike[_ScalarT], +def partition[ScalarT: np.generic]( + a: _ArrayLike[ScalarT], kth: _ArrayLikeInt, axis: SupportsIndex | None = -1, kind: _PartitionKind = "introselect", order: None = None, -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload def partition( a: _ArrayLike[np.void], @@ -373,192 +362,188 @@ def argpartition( # @overload -def sort( - a: _ArrayLike[_ScalarT], - axis: SupportsIndex | None = ..., - kind: _SortKind | None = ..., - order: str | Sequence[str] | None = ..., +def sort[ScalarT: np.generic]( + a: _ArrayLike[ScalarT], + axis: SupportsIndex | None = -1, + kind: _SortKind | None = None, + order: str | Sequence[str] | None = None, *, - stable: bool | None = ..., -) -> NDArray[_ScalarT]: ... + stable: bool | None = None, +) -> NDArray[ScalarT]: ... @overload def sort( a: ArrayLike, - axis: SupportsIndex | None = ..., - kind: _SortKind | None = ..., - order: str | Sequence[str] | None = ..., + axis: SupportsIndex | None = -1, + kind: _SortKind | None = None, + order: str | Sequence[str] | None = None, *, - stable: bool | None = ..., + stable: bool | None = None, ) -> NDArray[Any]: ... def argsort( a: ArrayLike, - axis: SupportsIndex | None = ..., - kind: _SortKind | None = ..., - order: str | Sequence[str] | None = ..., + axis: SupportsIndex | None = -1, + kind: _SortKind | None = None, + order: str | Sequence[str] | None = None, *, - stable: bool | None = ..., + stable: bool | None = None, ) -> NDArray[intp]: ... @overload def argmax( a: ArrayLike, - axis: None = ..., - out: None = ..., + axis: None = None, + out: None = None, *, - keepdims: Literal[False] = ..., + keepdims: Literal[False] | _NoValueType = ..., ) -> intp: ... @overload def argmax( a: ArrayLike, - axis: SupportsIndex | None = ..., - out: None = ..., + axis: SupportsIndex | None = None, + out: None = None, *, - keepdims: bool = ..., + keepdims: bool | _NoValueType = ..., ) -> Any: ... @overload -def argmax( +def argmax[BoolOrIntArrayT: NDArray[np.integer | np.bool]]( a: ArrayLike, axis: SupportsIndex | None, - out: _BoolOrIntArrayT, + out: BoolOrIntArrayT, *, - keepdims: bool = ..., -) -> _BoolOrIntArrayT: ... + keepdims: bool | _NoValueType = ..., +) -> BoolOrIntArrayT: ... @overload -def argmax( +def argmax[BoolOrIntArrayT: NDArray[np.integer | np.bool]]( a: ArrayLike, - axis: SupportsIndex | None = ..., + axis: SupportsIndex | None = None, *, - out: _BoolOrIntArrayT, - keepdims: bool = ..., -) -> _BoolOrIntArrayT: ... + out: BoolOrIntArrayT, + keepdims: bool | _NoValueType = ..., +) -> BoolOrIntArrayT: ... @overload def argmin( a: ArrayLike, - axis: None = ..., - out: None = ..., + axis: None = None, + out: None = None, *, - keepdims: Literal[False] = ..., + keepdims: Literal[False] | _NoValueType = ..., ) -> intp: ... @overload def argmin( a: ArrayLike, - axis: SupportsIndex | None = ..., - out: None = ..., + axis: SupportsIndex | None = None, + out: None = None, *, - keepdims: bool = ..., + keepdims: bool | _NoValueType = ..., ) -> Any: ... @overload -def argmin( +def argmin[BoolOrIntArrayT: NDArray[np.integer | np.bool]]( a: ArrayLike, axis: SupportsIndex | None, - out: _BoolOrIntArrayT, + out: BoolOrIntArrayT, *, - keepdims: bool = ..., -) -> _BoolOrIntArrayT: ... + keepdims: bool | _NoValueType = ..., +) -> BoolOrIntArrayT: ... @overload -def argmin( +def argmin[BoolOrIntArrayT: NDArray[np.integer | np.bool]]( a: ArrayLike, - axis: SupportsIndex | None = ..., + axis: SupportsIndex | None = None, *, - out: _BoolOrIntArrayT, - keepdims: bool = ..., -) -> _BoolOrIntArrayT: ... + out: BoolOrIntArrayT, + keepdims: bool | _NoValueType = ..., +) -> BoolOrIntArrayT: ... +# TODO: Fix overlapping overloads: https://github.com/numpy/numpy/issues/27032 @overload def searchsorted( a: ArrayLike, v: _ScalarLike_co, - side: _SortSide = ..., - sorter: _ArrayLikeInt_co | None = ..., # 1D int array + side: _SortSide = "left", + sorter: _ArrayLikeInt_co | None = None, # 1D int array ) -> intp: ... @overload def searchsorted( a: ArrayLike, v: ArrayLike, - side: _SortSide = ..., - sorter: _ArrayLikeInt_co | None = ..., # 1D int array + side: _SortSide = "left", + sorter: _ArrayLikeInt_co | None = None, # 1D int array ) -> NDArray[intp]: ... # @overload -def resize(a: _ArrayLike[_ScalarT], new_shape: SupportsIndex | tuple[SupportsIndex]) -> np.ndarray[tuple[int], np.dtype[_ScalarT]]: ... +def resize[ScalarT: np.generic](a: _ArrayLike[ScalarT], new_shape: SupportsIndex | tuple[SupportsIndex]) -> _Array1D[ScalarT]: ... @overload -def resize(a: _ArrayLike[_ScalarT], new_shape: _AnyShapeT) -> np.ndarray[_AnyShapeT, np.dtype[_ScalarT]]: ... +def resize[ScalarT: np.generic, AnyShapeT: (_0D, _1D, _2D, _3D, _4D)]( + a: _ArrayLike[ScalarT], + new_shape: AnyShapeT, +) -> np.ndarray[AnyShapeT, np.dtype[ScalarT]]: ... @overload -def resize(a: _ArrayLike[_ScalarT], new_shape: _ShapeLike) -> NDArray[_ScalarT]: ... +def resize[ScalarT: np.generic](a: _ArrayLike[ScalarT], new_shape: _ShapeLike) -> NDArray[ScalarT]: ... @overload -def resize(a: ArrayLike, new_shape: SupportsIndex | tuple[SupportsIndex]) -> np.ndarray[tuple[int], np.dtype]: ... +def resize(a: ArrayLike, new_shape: SupportsIndex | tuple[SupportsIndex]) -> np.ndarray[_1D]: ... @overload -def resize(a: ArrayLike, new_shape: _AnyShapeT) -> np.ndarray[_AnyShapeT, np.dtype]: ... +def resize[AnyShapeT: (_0D, _1D, _2D, _3D, _4D)](a: ArrayLike, new_shape: AnyShapeT) -> np.ndarray[AnyShapeT]: ... @overload def resize(a: ArrayLike, new_shape: _ShapeLike) -> NDArray[Any]: ... +# TODO: Fix overlapping overloads: https://github.com/numpy/numpy/issues/27032 @overload -def squeeze( - a: _ScalarT, - axis: _ShapeLike | None = ..., -) -> _ScalarT: ... +def squeeze[ScalarT: np.generic](a: ScalarT, axis: _ShapeLike | None = None) -> ScalarT: ... @overload -def squeeze( - a: _ArrayLike[_ScalarT], - axis: _ShapeLike | None = ..., -) -> NDArray[_ScalarT]: ... +def squeeze[ScalarT: np.generic](a: _ArrayLike[ScalarT], axis: _ShapeLike | None = None) -> NDArray[ScalarT]: ... @overload -def squeeze( - a: ArrayLike, - axis: _ShapeLike | None = ..., -) -> NDArray[Any]: ... +def squeeze(a: ArrayLike, axis: _ShapeLike | None = None) -> NDArray[Any]: ... +# keep in sync with `ma.core.diagonal` @overload -def diagonal( - a: _ArrayLike[_ScalarT], - offset: SupportsIndex = ..., - axis1: SupportsIndex = ..., - axis2: SupportsIndex = ..., # >= 2D array -) -> NDArray[_ScalarT]: ... +def diagonal[ScalarT: np.generic]( + a: _ArrayLike[ScalarT], + offset: SupportsIndex = 0, + axis1: SupportsIndex = 0, + axis2: SupportsIndex = 1, # >= 2D array +) -> NDArray[ScalarT]: ... @overload def diagonal( a: ArrayLike, - offset: SupportsIndex = ..., - axis1: SupportsIndex = ..., - axis2: SupportsIndex = ..., # >= 2D array + offset: SupportsIndex = 0, + axis1: SupportsIndex = 0, + axis2: SupportsIndex = 1, # >= 2D array ) -> NDArray[Any]: ... +# keep in sync with `ma.core.trace` @overload def trace( a: ArrayLike, # >= 2D array - offset: SupportsIndex = ..., - axis1: SupportsIndex = ..., - axis2: SupportsIndex = ..., - dtype: DTypeLike = ..., - out: None = ..., + offset: SupportsIndex = 0, + axis1: SupportsIndex = 0, + axis2: SupportsIndex = 1, + dtype: DTypeLike | None = None, + out: None = None, ) -> Any: ... @overload -def trace( +def trace[ArrayT: np.ndarray]( a: ArrayLike, # >= 2D array offset: SupportsIndex, axis1: SupportsIndex, axis2: SupportsIndex, - dtype: DTypeLike, - out: _ArrayT, -) -> _ArrayT: ... + dtype: DTypeLike | None, + out: ArrayT, +) -> ArrayT: ... @overload -def trace( +def trace[ArrayT: np.ndarray]( a: ArrayLike, # >= 2D array - offset: SupportsIndex = ..., - axis1: SupportsIndex = ..., - axis2: SupportsIndex = ..., - dtype: DTypeLike = ..., + offset: SupportsIndex = 0, + axis1: SupportsIndex = 0, + axis2: SupportsIndex = 1, + dtype: DTypeLike | None = None, *, - out: _ArrayT, -) -> _ArrayT: ... - -_Array1D: TypeAlias = np.ndarray[tuple[int], np.dtype[_ScalarT]] + out: ArrayT, +) -> ArrayT: ... @overload -def ravel(a: _ArrayLike[_ScalarT], order: _OrderKACF = "C") -> _Array1D[_ScalarT]: ... +def ravel[ScalarT: np.generic](a: _ArrayLike[ScalarT], order: _OrderKACF = "C") -> _Array1D[ScalarT]: ... @overload def ravel(a: bytes | _NestedSequence[bytes], order: _OrderKACF = "C") -> _Array1D[np.bytes_]: ... @overload @@ -566,262 +551,249 @@ def ravel(a: str | _NestedSequence[str], order: _OrderKACF = "C") -> _Array1D[np @overload def ravel(a: bool | _NestedSequence[bool], order: _OrderKACF = "C") -> _Array1D[np.bool]: ... @overload -def ravel(a: int | _NestedSequence[int], order: _OrderKACF = "C") -> _Array1D[np.int_ | np.bool]: ... +def ravel(a: int | _NestedSequence[int], order: _OrderKACF = "C") -> _Array1D[np.int_ | Any]: ... @overload -def ravel(a: float | _NestedSequence[float], order: _OrderKACF = "C") -> _Array1D[np.float64 | np.int_ | np.bool]: ... +def ravel(a: float | _NestedSequence[float], order: _OrderKACF = "C") -> _Array1D[np.float64 | Any]: ... @overload -def ravel( - a: complex | _NestedSequence[complex], - order: _OrderKACF = "C", -) -> _Array1D[np.complex128 | np.float64 | np.int_ | np.bool]: ... +def ravel(a: complex | _NestedSequence[complex], order: _OrderKACF = "C") -> _Array1D[np.complex128 | Any]: ... @overload -def ravel(a: ArrayLike, order: _OrderKACF = "C") -> np.ndarray[tuple[int], np.dtype]: ... +def ravel(a: ArrayLike, order: _OrderKACF = "C") -> np.ndarray[_1D]: ... -def nonzero(a: _ArrayLike[Any]) -> tuple[NDArray[intp], ...]: ... +def nonzero(a: _ArrayLike[Any]) -> tuple[_Array1D[np.intp], ...]: ... # this prevents `Any` from being returned with Pyright @overload def shape(a: _SupportsShape[Never]) -> _AnyShape: ... @overload -def shape(a: _SupportsShape[_ShapeT]) -> _ShapeT: ... +def shape[ShapeT: _Shape](a: _SupportsShape[ShapeT]) -> ShapeT: ... @overload def shape(a: _PyScalar) -> tuple[()]: ... # `collections.abc.Sequence` can't be used hesre, since `bytes` and `str` are # subtypes of it, which would make the return types incompatible. @overload -def shape(a: _PyArray[_PyScalar]) -> tuple[int]: ... +def shape(a: _PyArray[_PyScalar]) -> _1D: ... @overload -def shape(a: _PyArray[_PyArray[_PyScalar]]) -> tuple[int, int]: ... +def shape(a: _PyArray[_PyArray[_PyScalar]]) -> _2D: ... # this overload will be skipped by typecheckers that don't support PEP 688 @overload -def shape(a: memoryview | bytearray) -> tuple[int]: ... +def shape(a: memoryview | bytearray) -> _1D: ... @overload def shape(a: ArrayLike) -> _AnyShape: ... @overload -def compress( +def compress[ScalarT: np.generic]( condition: _ArrayLikeBool_co, # 1D bool array - a: _ArrayLike[_ScalarT], - axis: SupportsIndex | None = ..., - out: None = ..., -) -> NDArray[_ScalarT]: ... + a: _ArrayLike[ScalarT], + axis: SupportsIndex | None = None, + out: None = None, +) -> NDArray[ScalarT]: ... @overload def compress( condition: _ArrayLikeBool_co, # 1D bool array a: ArrayLike, - axis: SupportsIndex | None = ..., - out: None = ..., + axis: SupportsIndex | None = None, + out: None = None, ) -> NDArray[Any]: ... @overload -def compress( +def compress[ArrayT: np.ndarray]( condition: _ArrayLikeBool_co, # 1D bool array a: ArrayLike, axis: SupportsIndex | None, - out: _ArrayT, -) -> _ArrayT: ... + out: ArrayT, +) -> ArrayT: ... @overload -def compress( +def compress[ArrayT: np.ndarray]( condition: _ArrayLikeBool_co, # 1D bool array a: ArrayLike, - axis: SupportsIndex | None = ..., + axis: SupportsIndex | None = None, *, - out: _ArrayT, -) -> _ArrayT: ... + out: ArrayT, +) -> ArrayT: ... +# TODO: Fix overlapping overloads: https://github.com/numpy/numpy/issues/27032 @overload -def clip( - a: _ScalarT, - a_min: ArrayLike | None, - a_max: ArrayLike | None, - out: None = ..., - *, - min: ArrayLike | None = ..., - max: ArrayLike | None = ..., - dtype: None = ..., - where: _ArrayLikeBool_co | None = ..., - order: _OrderKACF = ..., - subok: bool = ..., - signature: str | tuple[str | None, ...] = ..., - casting: _CastingKind = ..., -) -> _ScalarT: ... +def clip[ScalarT: np.generic]( + a: ScalarT, + a_min: ArrayLike | _NoValueType | None = ..., + a_max: ArrayLike | _NoValueType | None = ..., + out: None = None, + *, + min: ArrayLike | _NoValueType | None = ..., + max: ArrayLike | _NoValueType | None = ..., + dtype: None = None, + **kwargs: Unpack[_UFuncKwargs], +) -> ScalarT: ... @overload def clip( a: _ScalarLike_co, - a_min: ArrayLike | None, - a_max: ArrayLike | None, - out: None = ..., - *, - min: ArrayLike | None = ..., - max: ArrayLike | None = ..., - dtype: None = ..., - where: _ArrayLikeBool_co | None = ..., - order: _OrderKACF = ..., - subok: bool = ..., - signature: str | tuple[str | None, ...] = ..., - casting: _CastingKind = ..., + a_min: ArrayLike | _NoValueType | None = ..., + a_max: ArrayLike | _NoValueType | None = ..., + out: None = None, + *, + min: ArrayLike | _NoValueType | None = ..., + max: ArrayLike | _NoValueType | None = ..., + dtype: None = None, + **kwargs: Unpack[_UFuncKwargs], ) -> Any: ... @overload -def clip( - a: _ArrayLike[_ScalarT], - a_min: ArrayLike | None, - a_max: ArrayLike | None, - out: None = ..., - *, - min: ArrayLike | None = ..., - max: ArrayLike | None = ..., - dtype: None = ..., - where: _ArrayLikeBool_co | None = ..., - order: _OrderKACF = ..., - subok: bool = ..., - signature: str | tuple[str | None, ...] = ..., - casting: _CastingKind = ..., -) -> NDArray[_ScalarT]: ... +def clip[ScalarT: np.generic]( + a: _ArrayLike[ScalarT], + a_min: ArrayLike | _NoValueType | None = ..., + a_max: ArrayLike | _NoValueType | None = ..., + out: None = None, + *, + min: ArrayLike | _NoValueType | None = ..., + max: ArrayLike | _NoValueType | None = ..., + dtype: None = None, + **kwargs: Unpack[_UFuncKwargs], +) -> NDArray[ScalarT]: ... @overload def clip( a: ArrayLike, - a_min: ArrayLike | None, - a_max: ArrayLike | None, - out: None = ..., - *, - min: ArrayLike | None = ..., - max: ArrayLike | None = ..., - dtype: None = ..., - where: _ArrayLikeBool_co | None = ..., - order: _OrderKACF = ..., - subok: bool = ..., - signature: str | tuple[str | None, ...] = ..., - casting: _CastingKind = ..., + a_min: ArrayLike | _NoValueType | None = ..., + a_max: ArrayLike | _NoValueType | None = ..., + out: None = None, + *, + min: ArrayLike | _NoValueType | None = ..., + max: ArrayLike | _NoValueType | None = ..., + dtype: None = None, + **kwargs: Unpack[_UFuncKwargs], ) -> NDArray[Any]: ... @overload -def clip( +def clip[ArrayT: np.ndarray]( a: ArrayLike, a_min: ArrayLike | None, a_max: ArrayLike | None, - out: _ArrayT, - *, - min: ArrayLike | None = ..., - max: ArrayLike | None = ..., - dtype: DTypeLike = ..., - where: _ArrayLikeBool_co | None = ..., - order: _OrderKACF = ..., - subok: bool = ..., - signature: str | tuple[str | None, ...] = ..., - casting: _CastingKind = ..., -) -> _ArrayT: ... + out: ArrayT, + *, + min: ArrayLike | _NoValueType | None = ..., + max: ArrayLike | _NoValueType | None = ..., + dtype: DTypeLike | None = None, + **kwargs: Unpack[_UFuncKwargs], +) -> ArrayT: ... +@overload +def clip[ArrayT: np.ndarray]( + a: ArrayLike, + a_min: ArrayLike | _NoValueType | None = ..., + a_max: ArrayLike | _NoValueType | None = ..., + *, + out: ArrayT, + min: ArrayLike | _NoValueType | None = ..., + max: ArrayLike | _NoValueType | None = ..., + dtype: DTypeLike | None = None, + **kwargs: Unpack[_UFuncKwargs], +) -> ArrayT: ... @overload def clip( a: ArrayLike, - a_min: ArrayLike | None, - a_max: ArrayLike | None, - out: ArrayLike = ..., - *, - min: ArrayLike | None = ..., - max: ArrayLike | None = ..., - dtype: DTypeLike, - where: _ArrayLikeBool_co | None = ..., - order: _OrderKACF = ..., - subok: bool = ..., - signature: str | tuple[str | None, ...] = ..., - casting: _CastingKind = ..., + a_min: ArrayLike | _NoValueType | None = ..., + a_max: ArrayLike | _NoValueType | None = ..., + out: None = None, + *, + min: ArrayLike | _NoValueType | None = ..., + max: ArrayLike | _NoValueType | None = ..., + dtype: DTypeLike | None = None, + **kwargs: Unpack[_UFuncKwargs], ) -> Any: ... @overload -def sum( - a: _ArrayLike[_ScalarT], - axis: None = ..., - dtype: None = ..., - out: None = ..., - keepdims: Literal[False] = ..., - initial: _NumberLike_co = ..., - where: _ArrayLikeBool_co = ..., -) -> _ScalarT: ... +def sum[ScalarT: np.generic]( + a: _ArrayLike[ScalarT], + axis: None = None, + dtype: None = None, + out: None = None, + keepdims: Literal[False] | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> ScalarT: ... @overload -def sum( - a: _ArrayLike[_ScalarT], - axis: None = ..., - dtype: None = ..., - out: None = ..., - keepdims: bool = ..., - initial: _NumberLike_co = ..., - where: _ArrayLikeBool_co = ..., -) -> _ScalarT | NDArray[_ScalarT]: ... +def sum[ScalarT: np.generic]( + a: _ArrayLike[ScalarT], + axis: None = None, + dtype: None = None, + out: None = None, + keepdims: bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> ScalarT | NDArray[ScalarT]: ... @overload -def sum( +def sum[ScalarT: np.generic]( a: ArrayLike, axis: None, - dtype: _DTypeLike[_ScalarT], - out: None = ..., - keepdims: Literal[False] = ..., - initial: _NumberLike_co = ..., - where: _ArrayLikeBool_co = ..., -) -> _ScalarT: ... + dtype: _DTypeLike[ScalarT], + out: None = None, + keepdims: Literal[False] | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> ScalarT: ... @overload -def sum( +def sum[ScalarT: np.generic]( a: ArrayLike, - axis: None = ..., + axis: None = None, *, - dtype: _DTypeLike[_ScalarT], - out: None = ..., - keepdims: Literal[False] = ..., - initial: _NumberLike_co = ..., - where: _ArrayLikeBool_co = ..., -) -> _ScalarT: ... + dtype: _DTypeLike[ScalarT], + out: None = None, + keepdims: Literal[False] | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> ScalarT: ... @overload -def sum( +def sum[ScalarT: np.generic]( a: ArrayLike, axis: _ShapeLike | None, - dtype: _DTypeLike[_ScalarT], - out: None = ..., - keepdims: bool = ..., - initial: _NumberLike_co = ..., - where: _ArrayLikeBool_co = ..., -) -> _ScalarT | NDArray[_ScalarT]: ... + dtype: _DTypeLike[ScalarT], + out: None = None, + keepdims: bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> ScalarT | NDArray[ScalarT]: ... @overload -def sum( +def sum[ScalarT: np.generic]( a: ArrayLike, - axis: _ShapeLike | None = ..., + axis: _ShapeLike | None = None, *, - dtype: _DTypeLike[_ScalarT], - out: None = ..., - keepdims: bool = ..., - initial: _NumberLike_co = ..., - where: _ArrayLikeBool_co = ..., -) -> _ScalarT | NDArray[_ScalarT]: ... + dtype: _DTypeLike[ScalarT], + out: None = None, + keepdims: bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> ScalarT | NDArray[ScalarT]: ... @overload def sum( a: ArrayLike, - axis: _ShapeLike | None = ..., - dtype: DTypeLike = ..., - out: None = ..., - keepdims: bool = ..., - initial: _NumberLike_co = ..., - where: _ArrayLikeBool_co = ..., + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + out: None = None, + keepdims: bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., ) -> Any: ... @overload -def sum( +def sum[ArrayT: np.ndarray]( a: ArrayLike, axis: _ShapeLike | None, - dtype: DTypeLike, - out: _ArrayT, - keepdims: bool = ..., - initial: _NumberLike_co = ..., - where: _ArrayLikeBool_co = ..., -) -> _ArrayT: ... + dtype: DTypeLike | None, + out: ArrayT, + keepdims: bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> ArrayT: ... @overload -def sum( +def sum[ArrayT: np.ndarray]( a: ArrayLike, - axis: _ShapeLike | None = ..., - dtype: DTypeLike = ..., - *, - out: _ArrayT, - keepdims: bool = ..., - initial: _NumberLike_co = ..., - where: _ArrayLikeBool_co = ..., -) -> _ArrayT: ... + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + *, + out: ArrayT, + keepdims: bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> ArrayT: ... +# keep in sync with `any` @overload def all( - a: ArrayLike, + a: ArrayLike | None, axis: None = None, out: None = None, keepdims: Literal[False, 0] | _NoValueType = ..., @@ -830,7 +802,7 @@ def all( ) -> np.bool: ... @overload def all( - a: ArrayLike, + a: ArrayLike | None, axis: int | tuple[int, ...] | None = None, out: None = None, keepdims: _BoolLike_co | _NoValueType = ..., @@ -838,27 +810,28 @@ def all( where: _ArrayLikeBool_co | _NoValueType = ..., ) -> Incomplete: ... @overload -def all( - a: ArrayLike, +def all[ArrayT: np.ndarray]( + a: ArrayLike | None, axis: int | tuple[int, ...] | None, - out: _ArrayT, + out: ArrayT, keepdims: _BoolLike_co | _NoValueType = ..., *, where: _ArrayLikeBool_co | _NoValueType = ..., -) -> _ArrayT: ... +) -> ArrayT: ... @overload -def all( - a: ArrayLike, +def all[ArrayT: np.ndarray]( + a: ArrayLike | None, axis: int | tuple[int, ...] | None = None, *, - out: _ArrayT, + out: ArrayT, keepdims: _BoolLike_co | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., -) -> _ArrayT: ... +) -> ArrayT: ... +# keep in sync with `all` @overload def any( - a: ArrayLike, + a: ArrayLike | None, axis: None = None, out: None = None, keepdims: Literal[False, 0] | _NoValueType = ..., @@ -867,7 +840,7 @@ def any( ) -> np.bool: ... @overload def any( - a: ArrayLike, + a: ArrayLike | None, axis: int | tuple[int, ...] | None = None, out: None = None, keepdims: _BoolLike_co | _NoValueType = ..., @@ -875,584 +848,589 @@ def any( where: _ArrayLikeBool_co | _NoValueType = ..., ) -> Incomplete: ... @overload -def any( - a: ArrayLike, +def any[ArrayT: np.ndarray]( + a: ArrayLike | None, axis: int | tuple[int, ...] | None, - out: _ArrayT, + out: ArrayT, keepdims: _BoolLike_co | _NoValueType = ..., *, where: _ArrayLikeBool_co | _NoValueType = ..., -) -> _ArrayT: ... +) -> ArrayT: ... @overload -def any( - a: ArrayLike, +def any[ArrayT: np.ndarray]( + a: ArrayLike | None, axis: int | tuple[int, ...] | None = None, *, - out: _ArrayT, + out: ArrayT, keepdims: _BoolLike_co | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., -) -> _ArrayT: ... +) -> ArrayT: ... +# @overload -def cumsum( - a: _ArrayLike[_ScalarT], - axis: SupportsIndex | None = ..., - dtype: None = ..., - out: None = ..., -) -> NDArray[_ScalarT]: ... +def cumsum[ScalarT: np.generic]( + a: _ArrayLike[ScalarT], + axis: SupportsIndex | None = None, + dtype: None = None, + out: None = None, +) -> NDArray[ScalarT]: ... @overload def cumsum( a: ArrayLike, - axis: SupportsIndex | None = ..., - dtype: None = ..., - out: None = ..., + axis: SupportsIndex | None = None, + dtype: None = None, + out: None = None, ) -> NDArray[Any]: ... @overload -def cumsum( +def cumsum[ScalarT: np.generic]( a: ArrayLike, axis: SupportsIndex | None, - dtype: _DTypeLike[_ScalarT], - out: None = ..., -) -> NDArray[_ScalarT]: ... + dtype: _DTypeLike[ScalarT], + out: None = None, +) -> NDArray[ScalarT]: ... @overload -def cumsum( +def cumsum[ScalarT: np.generic]( a: ArrayLike, - axis: SupportsIndex | None = ..., + axis: SupportsIndex | None = None, *, - dtype: _DTypeLike[_ScalarT], - out: None = ..., -) -> NDArray[_ScalarT]: ... + dtype: _DTypeLike[ScalarT], + out: None = None, +) -> NDArray[ScalarT]: ... @overload def cumsum( a: ArrayLike, - axis: SupportsIndex | None = ..., - dtype: DTypeLike = ..., - out: None = ..., + axis: SupportsIndex | None = None, + dtype: DTypeLike | None = None, + out: None = None, ) -> NDArray[Any]: ... @overload -def cumsum( +def cumsum[ArrayT: np.ndarray]( a: ArrayLike, axis: SupportsIndex | None, - dtype: DTypeLike, - out: _ArrayT, -) -> _ArrayT: ... + dtype: DTypeLike | None, + out: ArrayT, +) -> ArrayT: ... @overload -def cumsum( +def cumsum[ArrayT: np.ndarray]( a: ArrayLike, - axis: SupportsIndex | None = ..., - dtype: DTypeLike = ..., + axis: SupportsIndex | None = None, + dtype: DTypeLike | None = None, *, - out: _ArrayT, -) -> _ArrayT: ... + out: ArrayT, +) -> ArrayT: ... @overload -def cumulative_sum( - x: _ArrayLike[_ScalarT], +def cumulative_sum[ScalarT: np.generic]( + x: _ArrayLike[ScalarT], /, *, - axis: SupportsIndex | None = ..., - dtype: None = ..., - out: None = ..., - include_initial: bool = ..., -) -> NDArray[_ScalarT]: ... + axis: SupportsIndex | None = None, + dtype: None = None, + out: None = None, + include_initial: bool = False, +) -> NDArray[ScalarT]: ... @overload def cumulative_sum( x: ArrayLike, /, *, - axis: SupportsIndex | None = ..., - dtype: None = ..., - out: None = ..., - include_initial: bool = ..., + axis: SupportsIndex | None = None, + dtype: None = None, + out: None = None, + include_initial: bool = False, ) -> NDArray[Any]: ... @overload -def cumulative_sum( +def cumulative_sum[ScalarT: np.generic]( x: ArrayLike, /, *, - axis: SupportsIndex | None = ..., - dtype: _DTypeLike[_ScalarT], - out: None = ..., - include_initial: bool = ..., -) -> NDArray[_ScalarT]: ... + axis: SupportsIndex | None = None, + dtype: _DTypeLike[ScalarT], + out: None = None, + include_initial: bool = False, +) -> NDArray[ScalarT]: ... @overload def cumulative_sum( x: ArrayLike, /, *, - axis: SupportsIndex | None = ..., - dtype: DTypeLike = ..., - out: None = ..., - include_initial: bool = ..., + axis: SupportsIndex | None = None, + dtype: DTypeLike | None = None, + out: None = None, + include_initial: bool = False, ) -> NDArray[Any]: ... @overload -def cumulative_sum( +def cumulative_sum[ArrayT: np.ndarray]( x: ArrayLike, /, *, - axis: SupportsIndex | None = ..., - dtype: DTypeLike = ..., - out: _ArrayT, - include_initial: bool = ..., -) -> _ArrayT: ... + axis: SupportsIndex | None = None, + dtype: DTypeLike | None = None, + out: ArrayT, + include_initial: bool = False, +) -> ArrayT: ... @overload -def ptp( - a: _ArrayLike[_ScalarT], - axis: None = ..., - out: None = ..., - keepdims: Literal[False] = ..., -) -> _ScalarT: ... +def ptp[ScalarT: np.generic]( + a: _ArrayLike[ScalarT], + axis: None = None, + out: None = None, + keepdims: Literal[False] | _NoValueType = ..., +) -> ScalarT: ... @overload def ptp( a: ArrayLike, - axis: _ShapeLike | None = ..., - out: None = ..., - keepdims: bool = ..., + axis: _ShapeLike | None = None, + out: None = None, + keepdims: bool | _NoValueType = ..., ) -> Any: ... @overload -def ptp( +def ptp[ArrayT: np.ndarray]( a: ArrayLike, axis: _ShapeLike | None, - out: _ArrayT, - keepdims: bool = ..., -) -> _ArrayT: ... + out: ArrayT, + keepdims: bool | _NoValueType = ..., +) -> ArrayT: ... @overload -def ptp( +def ptp[ArrayT: np.ndarray]( a: ArrayLike, - axis: _ShapeLike | None = ..., + axis: _ShapeLike | None = None, *, - out: _ArrayT, - keepdims: bool = ..., -) -> _ArrayT: ... + out: ArrayT, + keepdims: bool | _NoValueType = ..., +) -> ArrayT: ... @overload -def amax( - a: _ArrayLike[_ScalarT], - axis: None = ..., - out: None = ..., - keepdims: Literal[False] = ..., - initial: _NumberLike_co = ..., - where: _ArrayLikeBool_co = ..., -) -> _ScalarT: ... +def amax[ScalarT: np.generic]( + a: _ArrayLike[ScalarT], + axis: None = None, + out: None = None, + keepdims: Literal[False] | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> ScalarT: ... @overload def amax( a: ArrayLike, - axis: _ShapeLike | None = ..., - out: None = ..., - keepdims: bool = ..., - initial: _NumberLike_co = ..., - where: _ArrayLikeBool_co = ..., + axis: _ShapeLike | None = None, + out: None = None, + keepdims: bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., ) -> Any: ... @overload -def amax( +def amax[ArrayT: np.ndarray]( a: ArrayLike, axis: _ShapeLike | None, - out: _ArrayT, - keepdims: bool = ..., - initial: _NumberLike_co = ..., - where: _ArrayLikeBool_co = ..., -) -> _ArrayT: ... + out: ArrayT, + keepdims: bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> ArrayT: ... @overload -def amax( +def amax[ArrayT: np.ndarray]( a: ArrayLike, - axis: _ShapeLike | None = ..., + axis: _ShapeLike | None = None, *, - out: _ArrayT, - keepdims: bool = ..., - initial: _NumberLike_co = ..., - where: _ArrayLikeBool_co = ..., -) -> _ArrayT: ... + out: ArrayT, + keepdims: bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> ArrayT: ... @overload -def amin( - a: _ArrayLike[_ScalarT], - axis: None = ..., - out: None = ..., - keepdims: Literal[False] = ..., - initial: _NumberLike_co = ..., - where: _ArrayLikeBool_co = ..., -) -> _ScalarT: ... +def amin[ScalarT: np.generic]( + a: _ArrayLike[ScalarT], + axis: None = None, + out: None = None, + keepdims: Literal[False] | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> ScalarT: ... @overload def amin( a: ArrayLike, - axis: _ShapeLike | None = ..., - out: None = ..., - keepdims: bool = ..., - initial: _NumberLike_co = ..., - where: _ArrayLikeBool_co = ..., + axis: _ShapeLike | None = None, + out: None = None, + keepdims: bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., ) -> Any: ... @overload -def amin( +def amin[ArrayT: np.ndarray]( a: ArrayLike, axis: _ShapeLike | None, - out: _ArrayT, - keepdims: bool = ..., - initial: _NumberLike_co = ..., - where: _ArrayLikeBool_co = ..., -) -> _ArrayT: ... + out: ArrayT, + keepdims: bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> ArrayT: ... @overload -def amin( +def amin[ArrayT: np.ndarray]( a: ArrayLike, - axis: _ShapeLike | None = ..., + axis: _ShapeLike | None = None, *, - out: _ArrayT, - keepdims: bool = ..., - initial: _NumberLike_co = ..., - where: _ArrayLikeBool_co = ..., -) -> _ArrayT: ... + out: ArrayT, + keepdims: bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> ArrayT: ... # TODO: `np.prod()``: For object arrays `initial` does not necessarily # have to be a numerical scalar. # The only requirement is that it is compatible # with the `.__mul__()` method(s) of the passed array's elements. - # Note that the same situation holds for all wrappers around # `np.ufunc.reduce`, e.g. `np.sum()` (`.__add__()`). +# TODO: Fix overlapping overloads: https://github.com/numpy/numpy/issues/27032 @overload def prod( a: _ArrayLikeBool_co, - axis: None = ..., - dtype: None = ..., - out: None = ..., - keepdims: Literal[False] = ..., - initial: _NumberLike_co = ..., - where: _ArrayLikeBool_co = ..., + axis: None = None, + dtype: None = None, + out: None = None, + keepdims: Literal[False] | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., ) -> int_: ... @overload def prod( a: _ArrayLikeUInt_co, - axis: None = ..., - dtype: None = ..., - out: None = ..., - keepdims: Literal[False] = ..., - initial: _NumberLike_co = ..., - where: _ArrayLikeBool_co = ..., + axis: None = None, + dtype: None = None, + out: None = None, + keepdims: Literal[False] | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., ) -> uint64: ... @overload def prod( a: _ArrayLikeInt_co, - axis: None = ..., - dtype: None = ..., - out: None = ..., - keepdims: Literal[False] = ..., - initial: _NumberLike_co = ..., - where: _ArrayLikeBool_co = ..., + axis: None = None, + dtype: None = None, + out: None = None, + keepdims: Literal[False] | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., ) -> int64: ... @overload def prod( a: _ArrayLikeFloat_co, - axis: None = ..., - dtype: None = ..., - out: None = ..., - keepdims: Literal[False] = ..., - initial: _NumberLike_co = ..., - where: _ArrayLikeBool_co = ..., + axis: None = None, + dtype: None = None, + out: None = None, + keepdims: Literal[False] | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., ) -> floating: ... @overload def prod( a: _ArrayLikeComplex_co, - axis: None = ..., - dtype: None = ..., - out: None = ..., - keepdims: Literal[False] = ..., - initial: _NumberLike_co = ..., - where: _ArrayLikeBool_co = ..., + axis: None = None, + dtype: None = None, + out: None = None, + keepdims: Literal[False] | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., ) -> complexfloating: ... @overload def prod( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - axis: _ShapeLike | None = ..., - dtype: None = ..., - out: None = ..., - keepdims: bool = ..., - initial: _NumberLike_co = ..., - where: _ArrayLikeBool_co = ..., + axis: _ShapeLike | None = None, + dtype: None = None, + out: None = None, + keepdims: bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., ) -> Any: ... @overload -def prod( +def prod[ScalarT: np.generic]( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, axis: None, - dtype: _DTypeLike[_ScalarT], - out: None = ..., - keepdims: Literal[False] = ..., - initial: _NumberLike_co = ..., - where: _ArrayLikeBool_co = ..., -) -> _ScalarT: ... + dtype: _DTypeLike[ScalarT], + out: None = None, + keepdims: Literal[False] | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> ScalarT: ... @overload -def prod( +def prod[ScalarT: np.generic]( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - axis: None = ..., + axis: None = None, *, - dtype: _DTypeLike[_ScalarT], - out: None = ..., - keepdims: Literal[False] = ..., - initial: _NumberLike_co = ..., - where: _ArrayLikeBool_co = ..., -) -> _ScalarT: ... + dtype: _DTypeLike[ScalarT], + out: None = None, + keepdims: Literal[False] | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> ScalarT: ... @overload def prod( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - axis: _ShapeLike | None = ..., - dtype: DTypeLike | None = ..., - out: None = ..., - keepdims: bool = ..., - initial: _NumberLike_co = ..., - where: _ArrayLikeBool_co = ..., + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + out: None = None, + keepdims: bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., ) -> Any: ... @overload -def prod( +def prod[ArrayT: np.ndarray]( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, axis: _ShapeLike | None, dtype: DTypeLike | None, - out: _ArrayT, - keepdims: bool = ..., - initial: _NumberLike_co = ..., - where: _ArrayLikeBool_co = ..., -) -> _ArrayT: ... + out: ArrayT, + keepdims: bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> ArrayT: ... @overload -def prod( +def prod[ArrayT: np.ndarray]( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - axis: _ShapeLike | None = ..., - dtype: DTypeLike | None = ..., - *, - out: _ArrayT, - keepdims: bool = ..., - initial: _NumberLike_co = ..., - where: _ArrayLikeBool_co = ..., -) -> _ArrayT: ... + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + *, + out: ArrayT, + keepdims: bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> ArrayT: ... +# TODO: Fix overlapping overloads: https://github.com/numpy/numpy/issues/27032 @overload def cumprod( a: _ArrayLikeBool_co, - axis: SupportsIndex | None = ..., - dtype: None = ..., - out: None = ..., + axis: SupportsIndex | None = None, + dtype: None = None, + out: None = None, ) -> NDArray[int_]: ... @overload def cumprod( a: _ArrayLikeUInt_co, - axis: SupportsIndex | None = ..., - dtype: None = ..., - out: None = ..., + axis: SupportsIndex | None = None, + dtype: None = None, + out: None = None, ) -> NDArray[uint64]: ... @overload def cumprod( a: _ArrayLikeInt_co, - axis: SupportsIndex | None = ..., - dtype: None = ..., - out: None = ..., + axis: SupportsIndex | None = None, + dtype: None = None, + out: None = None, ) -> NDArray[int64]: ... @overload def cumprod( a: _ArrayLikeFloat_co, - axis: SupportsIndex | None = ..., - dtype: None = ..., - out: None = ..., + axis: SupportsIndex | None = None, + dtype: None = None, + out: None = None, ) -> NDArray[floating]: ... @overload def cumprod( a: _ArrayLikeComplex_co, - axis: SupportsIndex | None = ..., - dtype: None = ..., - out: None = ..., + axis: SupportsIndex | None = None, + dtype: None = None, + out: None = None, ) -> NDArray[complexfloating]: ... @overload def cumprod( a: _ArrayLikeObject_co, - axis: SupportsIndex | None = ..., - dtype: None = ..., - out: None = ..., + axis: SupportsIndex | None = None, + dtype: None = None, + out: None = None, ) -> NDArray[object_]: ... @overload -def cumprod( +def cumprod[ScalarT: np.generic]( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, axis: SupportsIndex | None, - dtype: _DTypeLike[_ScalarT], - out: None = ..., -) -> NDArray[_ScalarT]: ... + dtype: _DTypeLike[ScalarT], + out: None = None, +) -> NDArray[ScalarT]: ... @overload -def cumprod( +def cumprod[ScalarT: np.generic]( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - axis: SupportsIndex | None = ..., + axis: SupportsIndex | None = None, *, - dtype: _DTypeLike[_ScalarT], - out: None = ..., -) -> NDArray[_ScalarT]: ... + dtype: _DTypeLike[ScalarT], + out: None = None, +) -> NDArray[ScalarT]: ... @overload def cumprod( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - axis: SupportsIndex | None = ..., - dtype: DTypeLike = ..., - out: None = ..., + axis: SupportsIndex | None = None, + dtype: DTypeLike | None = None, + out: None = None, ) -> NDArray[Any]: ... @overload -def cumprod( +def cumprod[ArrayT: np.ndarray]( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, axis: SupportsIndex | None, - dtype: DTypeLike, - out: _ArrayT, -) -> _ArrayT: ... + dtype: DTypeLike | None, + out: ArrayT, +) -> ArrayT: ... @overload -def cumprod( +def cumprod[ArrayT: np.ndarray]( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - axis: SupportsIndex | None = ..., - dtype: DTypeLike = ..., + axis: SupportsIndex | None = None, + dtype: DTypeLike | None = None, *, - out: _ArrayT, -) -> _ArrayT: ... + out: ArrayT, +) -> ArrayT: ... +# TODO: Fix overlapping overloads: https://github.com/numpy/numpy/issues/27032 @overload def cumulative_prod( x: _ArrayLikeBool_co, /, *, - axis: SupportsIndex | None = ..., - dtype: None = ..., - out: None = ..., - include_initial: bool = ..., + axis: SupportsIndex | None = None, + dtype: None = None, + out: None = None, + include_initial: bool = False, ) -> NDArray[int_]: ... @overload def cumulative_prod( x: _ArrayLikeUInt_co, /, *, - axis: SupportsIndex | None = ..., - dtype: None = ..., - out: None = ..., - include_initial: bool = ..., + axis: SupportsIndex | None = None, + dtype: None = None, + out: None = None, + include_initial: bool = False, ) -> NDArray[uint64]: ... @overload def cumulative_prod( x: _ArrayLikeInt_co, /, *, - axis: SupportsIndex | None = ..., - dtype: None = ..., - out: None = ..., - include_initial: bool = ..., + axis: SupportsIndex | None = None, + dtype: None = None, + out: None = None, + include_initial: bool = False, ) -> NDArray[int64]: ... @overload def cumulative_prod( x: _ArrayLikeFloat_co, /, *, - axis: SupportsIndex | None = ..., - dtype: None = ..., - out: None = ..., - include_initial: bool = ..., + axis: SupportsIndex | None = None, + dtype: None = None, + out: None = None, + include_initial: bool = False, ) -> NDArray[floating]: ... @overload def cumulative_prod( x: _ArrayLikeComplex_co, /, *, - axis: SupportsIndex | None = ..., - dtype: None = ..., - out: None = ..., - include_initial: bool = ..., + axis: SupportsIndex | None = None, + dtype: None = None, + out: None = None, + include_initial: bool = False, ) -> NDArray[complexfloating]: ... @overload def cumulative_prod( x: _ArrayLikeObject_co, /, *, - axis: SupportsIndex | None = ..., - dtype: None = ..., - out: None = ..., - include_initial: bool = ..., + axis: SupportsIndex | None = None, + dtype: None = None, + out: None = None, + include_initial: bool = False, ) -> NDArray[object_]: ... @overload -def cumulative_prod( +def cumulative_prod[ScalarT: np.generic]( x: _ArrayLikeComplex_co | _ArrayLikeObject_co, /, *, - axis: SupportsIndex | None = ..., - dtype: _DTypeLike[_ScalarT], - out: None = ..., - include_initial: bool = ..., -) -> NDArray[_ScalarT]: ... + axis: SupportsIndex | None = None, + dtype: _DTypeLike[ScalarT], + out: None = None, + include_initial: bool = False, +) -> NDArray[ScalarT]: ... @overload def cumulative_prod( x: _ArrayLikeComplex_co | _ArrayLikeObject_co, /, *, - axis: SupportsIndex | None = ..., - dtype: DTypeLike = ..., - out: None = ..., - include_initial: bool = ..., + axis: SupportsIndex | None = None, + dtype: DTypeLike | None = None, + out: None = None, + include_initial: bool = False, ) -> NDArray[Any]: ... @overload -def cumulative_prod( +def cumulative_prod[ArrayT: np.ndarray]( x: _ArrayLikeComplex_co | _ArrayLikeObject_co, /, *, - axis: SupportsIndex | None = ..., - dtype: DTypeLike = ..., - out: _ArrayT, - include_initial: bool = ..., -) -> _ArrayT: ... + axis: SupportsIndex | None = None, + dtype: DTypeLike | None = None, + out: ArrayT, + include_initial: bool = False, +) -> ArrayT: ... def ndim(a: ArrayLike) -> int: ... -def size(a: ArrayLike, axis: int | None = ...) -> int: ... +def size(a: ArrayLike, axis: int | tuple[int, ...] | None = None) -> int: ... +# TODO: Fix overlapping overloads: https://github.com/numpy/numpy/issues/27032 @overload def around( a: _BoolLike_co, - decimals: SupportsIndex = ..., - out: None = ..., + decimals: SupportsIndex = 0, + out: None = None, ) -> float16: ... @overload -def around( - a: _NumberOrObjectT, - decimals: SupportsIndex = ..., - out: None = ..., -) -> _NumberOrObjectT: ... +def around[NumberOrObjectT: np.number | np.object_]( + a: NumberOrObjectT, + decimals: SupportsIndex = 0, + out: None = None, +) -> NumberOrObjectT: ... @overload def around( a: _ComplexLike_co | object_, - decimals: SupportsIndex = ..., - out: None = ..., + decimals: SupportsIndex = 0, + out: None = None, ) -> Any: ... @overload def around( a: _ArrayLikeBool_co, - decimals: SupportsIndex = ..., - out: None = ..., + decimals: SupportsIndex = 0, + out: None = None, ) -> NDArray[float16]: ... @overload -def around( - a: _ArrayLike[_NumberOrObjectT], - decimals: SupportsIndex = ..., - out: None = ..., -) -> NDArray[_NumberOrObjectT]: ... +def around[NumberOrObjectT: np.number | np.object_]( + a: _ArrayLike[NumberOrObjectT], + decimals: SupportsIndex = 0, + out: None = None, +) -> NDArray[NumberOrObjectT]: ... @overload def around( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - decimals: SupportsIndex = ..., - out: None = ..., + decimals: SupportsIndex = 0, + out: None = None, ) -> NDArray[Any]: ... @overload -def around( +def around[ArrayT: np.ndarray]( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, decimals: SupportsIndex, - out: _ArrayT, -) -> _ArrayT: ... + out: ArrayT, +) -> ArrayT: ... @overload -def around( +def around[ArrayT: np.ndarray]( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - decimals: SupportsIndex = ..., + decimals: SupportsIndex = 0, *, - out: _ArrayT, -) -> _ArrayT: ... + out: ArrayT, +) -> ArrayT: ... +# TODO: Fix overlapping overloads: https://github.com/numpy/numpy/issues/27032 @overload def mean( a: _ArrayLikeFloat_co, - axis: None = ..., - dtype: None = ..., - out: None = ..., + axis: None = None, + dtype: None = None, + out: None = None, keepdims: Literal[False] | _NoValueType = ..., *, where: _ArrayLikeBool_co | _NoValueType = ..., @@ -1460,9 +1438,9 @@ def mean( @overload def mean( a: _ArrayLikeComplex_co, - axis: None = ..., - dtype: None = ..., - out: None = ..., + axis: None = None, + dtype: None = None, + out: None = None, keepdims: Literal[False] | _NoValueType = ..., *, where: _ArrayLikeBool_co | _NoValueType = ..., @@ -1470,89 +1448,89 @@ def mean( @overload def mean( a: _ArrayLike[np.timedelta64], - axis: None = ..., - dtype: None = ..., - out: None = ..., + axis: None = None, + dtype: None = None, + out: None = None, keepdims: Literal[False] | _NoValueType = ..., *, where: _ArrayLikeBool_co | _NoValueType = ..., -) -> timedelta64: ... +) -> np.timedelta64: ... @overload -def mean( +def mean[ArrayT: np.ndarray]( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, axis: _ShapeLike | None, - dtype: DTypeLike, - out: _ArrayT, + dtype: DTypeLike | None, + out: ArrayT, keepdims: bool | _NoValueType = ..., *, where: _ArrayLikeBool_co | _NoValueType = ..., -) -> _ArrayT: ... +) -> ArrayT: ... @overload -def mean( +def mean[ArrayT: np.ndarray]( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - axis: _ShapeLike | None = ..., - dtype: DTypeLike | None = ..., + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, *, - out: _ArrayT, + out: ArrayT, keepdims: bool | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., -) -> _ArrayT: ... +) -> ArrayT: ... @overload -def mean( +def mean[ScalarT: np.generic]( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, axis: None, - dtype: _DTypeLike[_ScalarT], - out: None = ..., + dtype: _DTypeLike[ScalarT], + out: None = None, keepdims: Literal[False] | _NoValueType = ..., *, where: _ArrayLikeBool_co | _NoValueType = ..., -) -> _ScalarT: ... +) -> ScalarT: ... @overload -def mean( +def mean[ScalarT: np.generic]( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - axis: None = ..., + axis: None = None, *, - dtype: _DTypeLike[_ScalarT], - out: None = ..., + dtype: _DTypeLike[ScalarT], + out: None = None, keepdims: Literal[False] | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., -) -> _ScalarT: ... +) -> ScalarT: ... @overload -def mean( +def mean[ScalarT: np.generic]( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, axis: _ShapeLike | None, - dtype: _DTypeLike[_ScalarT], + dtype: _DTypeLike[ScalarT], out: None, keepdims: Literal[True, 1], *, where: _ArrayLikeBool_co | _NoValueType = ..., -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload -def mean( +def mean[ScalarT: np.generic]( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, axis: _ShapeLike | None, - dtype: _DTypeLike[_ScalarT], - out: None = ..., + dtype: _DTypeLike[ScalarT], + out: None = None, *, keepdims: bool | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., -) -> _ScalarT | NDArray[_ScalarT]: ... +) -> ScalarT | NDArray[ScalarT]: ... @overload -def mean( +def mean[ScalarT: np.generic]( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - axis: _ShapeLike | None = ..., + axis: _ShapeLike | None = None, *, - dtype: _DTypeLike[_ScalarT], - out: None = ..., + dtype: _DTypeLike[ScalarT], + out: None = None, keepdims: bool | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., -) -> _ScalarT | NDArray[_ScalarT]: ... +) -> ScalarT | NDArray[ScalarT]: ... @overload def mean( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - axis: _ShapeLike | None = ..., - dtype: DTypeLike | None = ..., - out: None = ..., + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + out: None = None, keepdims: bool | _NoValueType = ..., *, where: _ArrayLikeBool_co | _NoValueType = ..., @@ -1561,11 +1539,11 @@ def mean( @overload def std( a: _ArrayLikeComplex_co, - axis: None = ..., - dtype: None = ..., - out: None = ..., - ddof: float = ..., - keepdims: Literal[False] = ..., + axis: None = None, + dtype: None = None, + out: None = None, + ddof: float = 0, + keepdims: Literal[False] | _NoValueType = ..., *, where: _ArrayLikeBool_co | _NoValueType = ..., mean: _ArrayLikeComplex_co | _NoValueType = ..., @@ -1574,90 +1552,90 @@ def std( @overload def std( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - axis: _ShapeLike | None = ..., - dtype: None = ..., - out: None = ..., - ddof: float = ..., - keepdims: bool = ..., + axis: _ShapeLike | None = None, + dtype: None = None, + out: None = None, + ddof: float = 0, + keepdims: bool | _NoValueType = ..., *, where: _ArrayLikeBool_co | _NoValueType = ..., mean: _ArrayLikeComplex_co | _ArrayLikeObject_co | _NoValueType = ..., correction: float | _NoValueType = ..., ) -> Any: ... @overload -def std( +def std[ScalarT: np.generic]( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, axis: None, - dtype: _DTypeLike[_ScalarT], - out: None = ..., - ddof: float = ..., - keepdims: Literal[False] = ..., + dtype: _DTypeLike[ScalarT], + out: None = None, + ddof: float = 0, + keepdims: Literal[False] | _NoValueType = ..., *, where: _ArrayLikeBool_co | _NoValueType = ..., mean: _ArrayLikeComplex_co | _ArrayLikeObject_co | _NoValueType = ..., correction: float | _NoValueType = ..., -) -> _ScalarT: ... +) -> ScalarT: ... @overload -def std( +def std[ScalarT: np.generic]( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - axis: None = ..., + axis: None = None, *, - dtype: _DTypeLike[_ScalarT], - out: None = ..., - ddof: float = ..., - keepdims: Literal[False] = ..., + dtype: _DTypeLike[ScalarT], + out: None = None, + ddof: float = 0, + keepdims: Literal[False] | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., mean: _ArrayLikeComplex_co | _ArrayLikeObject_co | _NoValueType = ..., correction: float | _NoValueType = ..., -) -> _ScalarT: ... +) -> ScalarT: ... @overload def std( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - axis: _ShapeLike | None = ..., - dtype: DTypeLike = ..., - out: None = ..., - ddof: float = ..., - keepdims: bool = ..., + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + out: None = None, + ddof: float = 0, + keepdims: bool | _NoValueType = ..., *, where: _ArrayLikeBool_co | _NoValueType = ..., mean: _ArrayLikeComplex_co | _ArrayLikeObject_co | _NoValueType = ..., correction: float | _NoValueType = ..., ) -> Any: ... @overload -def std( +def std[ArrayT: np.ndarray]( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, axis: _ShapeLike | None, - dtype: DTypeLike, - out: _ArrayT, - ddof: float = ..., - keepdims: bool = ..., + dtype: DTypeLike | None, + out: ArrayT, + ddof: float = 0, + keepdims: bool | _NoValueType = ..., *, where: _ArrayLikeBool_co | _NoValueType = ..., mean: _ArrayLikeComplex_co | _ArrayLikeObject_co | _NoValueType = ..., correction: float | _NoValueType = ..., -) -> _ArrayT: ... +) -> ArrayT: ... @overload -def std( +def std[ArrayT: np.ndarray]( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - axis: _ShapeLike | None = ..., - dtype: DTypeLike = ..., + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, *, - out: _ArrayT, - ddof: float = ..., - keepdims: bool = ..., + out: ArrayT, + ddof: float = 0, + keepdims: bool | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., mean: _ArrayLikeComplex_co | _ArrayLikeObject_co | _NoValueType = ..., correction: float | _NoValueType = ..., -) -> _ArrayT: ... +) -> ArrayT: ... @overload def var( a: _ArrayLikeComplex_co, - axis: None = ..., - dtype: None = ..., - out: None = ..., - ddof: float = ..., - keepdims: Literal[False] = ..., + axis: None = None, + dtype: None = None, + out: None = None, + ddof: float = 0, + keepdims: Literal[False] | _NoValueType = ..., *, where: _ArrayLikeBool_co | _NoValueType = ..., mean: _ArrayLikeComplex_co | _NoValueType = ..., @@ -1666,81 +1644,81 @@ def var( @overload def var( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - axis: _ShapeLike | None = ..., - dtype: None = ..., - out: None = ..., - ddof: float = ..., - keepdims: bool = ..., + axis: _ShapeLike | None = None, + dtype: None = None, + out: None = None, + ddof: float = 0, + keepdims: bool | _NoValueType = ..., *, where: _ArrayLikeBool_co | _NoValueType = ..., mean: _ArrayLikeComplex_co | _ArrayLikeObject_co | _NoValueType = ..., correction: float | _NoValueType = ..., ) -> Any: ... @overload -def var( +def var[ScalarT: np.generic]( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, axis: None, - dtype: _DTypeLike[_ScalarT], - out: None = ..., - ddof: float = ..., - keepdims: Literal[False] = ..., + dtype: _DTypeLike[ScalarT], + out: None = None, + ddof: float = 0, + keepdims: Literal[False] | _NoValueType = ..., *, where: _ArrayLikeBool_co | _NoValueType = ..., mean: _ArrayLikeComplex_co | _ArrayLikeObject_co | _NoValueType = ..., correction: float | _NoValueType = ..., -) -> _ScalarT: ... +) -> ScalarT: ... @overload -def var( +def var[ScalarT: np.generic]( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - axis: None = ..., + axis: None = None, *, - dtype: _DTypeLike[_ScalarT], - out: None = ..., - ddof: float = ..., - keepdims: Literal[False] = ..., + dtype: _DTypeLike[ScalarT], + out: None = None, + ddof: float = 0, + keepdims: Literal[False] | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., mean: _ArrayLikeComplex_co | _ArrayLikeObject_co | _NoValueType = ..., correction: float | _NoValueType = ..., -) -> _ScalarT: ... +) -> ScalarT: ... @overload def var( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - axis: _ShapeLike | None = ..., - dtype: DTypeLike = ..., - out: None = ..., - ddof: float = ..., - keepdims: bool = ..., + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + out: None = None, + ddof: float = 0, + keepdims: bool | _NoValueType = ..., *, where: _ArrayLikeBool_co | _NoValueType = ..., mean: _ArrayLikeComplex_co | _ArrayLikeObject_co | _NoValueType = ..., correction: float | _NoValueType = ..., ) -> Any: ... @overload -def var( +def var[ArrayT: np.ndarray]( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, axis: _ShapeLike | None, - dtype: DTypeLike, - out: _ArrayT, - ddof: float = ..., - keepdims: bool = ..., + dtype: DTypeLike | None, + out: ArrayT, + ddof: float = 0, + keepdims: bool | _NoValueType = ..., *, where: _ArrayLikeBool_co | _NoValueType = ..., mean: _ArrayLikeComplex_co | _ArrayLikeObject_co | _NoValueType = ..., correction: float | _NoValueType = ..., -) -> _ArrayT: ... +) -> ArrayT: ... @overload -def var( +def var[ArrayT: np.ndarray]( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - axis: _ShapeLike | None = ..., - dtype: DTypeLike = ..., + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, *, - out: _ArrayT, - ddof: float = ..., - keepdims: bool = ..., + out: ArrayT, + ddof: float = 0, + keepdims: bool | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., mean: _ArrayLikeComplex_co | _ArrayLikeObject_co | _NoValueType = ..., correction: float | _NoValueType = ..., -) -> _ArrayT: ... +) -> ArrayT: ... max = amax min = amin diff --git a/numpy/_core/function_base.py b/numpy/_core/function_base.py index 12ab2a7ef546..b01ba108d2c4 100644 --- a/numpy/_core/function_base.py +++ b/numpy/_core/function_base.py @@ -1,4 +1,5 @@ import functools +import inspect import operator import types import warnings @@ -37,7 +38,7 @@ def linspace(start, stop, num=50, endpoint=True, retstep=False, dtype=None, .. versionchanged:: 1.20.0 Values are rounded towards ``-inf`` instead of ``0`` when an integer ``dtype`` is specified. The old behavior can - still be obtained with ``np.linspace(start, stop, num).astype(int)`` + still be obtained with ``np.linspace(start, stop, num).astype(np.int_)`` Parameters ---------- @@ -374,9 +375,9 @@ def geomspace(start, stop, num=50, endpoint=True, dtype=None, axis=0): Note that the above may not produce exact integers: - >>> np.geomspace(1, 256, num=9, dtype=int) + >>> np.geomspace(1, 256, num=9, dtype=np.int_) array([ 1, 2, 4, 7, 16, 32, 63, 127, 256]) - >>> np.around(np.geomspace(1, 256, num=9)).astype(int) + >>> np.around(np.geomspace(1, 256, num=9)).astype(np.int_) array([ 1, 2, 4, 8, 16, 32, 64, 128, 256]) Negative, decreasing, and complex inputs are allowed: @@ -477,6 +478,9 @@ def _add_docstring(obj, doc, warn_on_python): "Prefer to attach it directly to the source.", UserWarning, stacklevel=3) + + doc = inspect.cleandoc(doc) + try: add_docstring(obj, doc) except Exception: @@ -494,10 +498,10 @@ def add_newdoc(place, obj, doc, warn_on_python=True): ---------- place : str The absolute name of the module to import from - obj : str or None + obj : str | None The name of the object to add documentation to, typically a class or function name. - doc : {str, Tuple[str, str], List[Tuple[str, str]]} + doc : str | tuple[str, str] | list[tuple[str, str]] If a string, the documentation to apply to `obj` If a tuple, then the first element is interpreted as an attribute @@ -534,12 +538,10 @@ def add_newdoc(place, obj, doc, warn_on_python=True): if isinstance(doc, str): if "${ARRAY_FUNCTION_LIKE}" in doc: doc = overrides.get_array_function_like_doc(new, doc) - _add_docstring(new, doc.strip(), warn_on_python) + _add_docstring(new, doc, warn_on_python) elif isinstance(doc, tuple): attr, docstring = doc - _add_docstring(getattr(new, attr), docstring.strip(), warn_on_python) + _add_docstring(getattr(new, attr), docstring, warn_on_python) elif isinstance(doc, list): for attr, docstring in doc: - _add_docstring( - getattr(new, attr), docstring.strip(), warn_on_python - ) + _add_docstring(getattr(new, attr), docstring, warn_on_python) diff --git a/numpy/_core/function_base.pyi b/numpy/_core/function_base.pyi index 44d1311f5b44..060a44d416ea 100644 --- a/numpy/_core/function_base.pyi +++ b/numpy/_core/function_base.pyi @@ -1,7 +1,5 @@ -from typing import Literal as L -from typing import SupportsIndex, TypeAlias, TypeVar, overload - from _typeshed import Incomplete +from typing import Literal as L, SupportsIndex, overload import numpy as np from numpy._typing import ( @@ -15,9 +13,9 @@ from numpy._typing._array_like import _DualArrayLike __all__ = ["geomspace", "linspace", "logspace"] -_ScalarT = TypeVar("_ScalarT", bound=np.generic) +type _ToArrayFloat64 = _DualArrayLike[np.dtype[np.float64 | np.integer | np.bool], float] -_ToArrayFloat64: TypeAlias = _DualArrayLike[np.dtype[np.float64 | np.integer | np.bool], float] +### @overload def linspace( @@ -56,29 +54,29 @@ def linspace( device: L["cpu"] | None = None, ) -> NDArray[np.complexfloating]: ... @overload -def linspace( +def linspace[ScalarT: np.generic]( start: _ArrayLikeComplex_co, stop: _ArrayLikeComplex_co, num: SupportsIndex, endpoint: bool, retstep: L[False], - dtype: _DTypeLike[_ScalarT], + dtype: _DTypeLike[ScalarT], axis: SupportsIndex = 0, *, device: L["cpu"] | None = None, -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload -def linspace( +def linspace[ScalarT: np.generic]( start: _ArrayLikeComplex_co, stop: _ArrayLikeComplex_co, num: SupportsIndex = 50, endpoint: bool = True, retstep: L[False] = False, *, - dtype: _DTypeLike[_ScalarT], + dtype: _DTypeLike[ScalarT], axis: SupportsIndex = 0, device: L["cpu"] | None = None, -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload def linspace( start: _ArrayLikeComplex_co, @@ -128,17 +126,17 @@ def linspace( device: L["cpu"] | None = None, ) -> tuple[NDArray[np.complexfloating], np.complexfloating]: ... @overload -def linspace( +def linspace[ScalarT: np.generic]( start: _ArrayLikeComplex_co, stop: _ArrayLikeComplex_co, num: SupportsIndex = 50, endpoint: bool = True, *, retstep: L[True], - dtype: _DTypeLike[_ScalarT], + dtype: _DTypeLike[ScalarT], axis: SupportsIndex = 0, device: L["cpu"] | None = None, -) -> tuple[NDArray[_ScalarT], _ScalarT]: ... +) -> tuple[NDArray[ScalarT], ScalarT]: ... @overload def linspace( start: _ArrayLikeComplex_co, @@ -183,26 +181,26 @@ def logspace( axis: SupportsIndex = 0, ) -> NDArray[np.complexfloating]: ... @overload -def logspace( +def logspace[ScalarT: np.generic]( start: _ArrayLikeComplex_co, stop: _ArrayLikeComplex_co, num: SupportsIndex, endpoint: bool, base: _ArrayLikeComplex_co, - dtype: _DTypeLike[_ScalarT], + dtype: _DTypeLike[ScalarT], axis: SupportsIndex = 0, -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload -def logspace( +def logspace[ScalarT: np.generic]( start: _ArrayLikeComplex_co, stop: _ArrayLikeComplex_co, num: SupportsIndex = 50, endpoint: bool = True, base: _ArrayLikeComplex_co = 10.0, *, - dtype: _DTypeLike[_ScalarT], + dtype: _DTypeLike[ScalarT], axis: SupportsIndex = 0, -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload def logspace( start: _ArrayLikeComplex_co, @@ -242,24 +240,24 @@ def geomspace( axis: SupportsIndex = 0, ) -> NDArray[np.complexfloating]: ... @overload -def geomspace( +def geomspace[ScalarT: np.generic]( start: _ArrayLikeComplex_co, stop: _ArrayLikeComplex_co, num: SupportsIndex, endpoint: bool, - dtype: _DTypeLike[_ScalarT], + dtype: _DTypeLike[ScalarT], axis: SupportsIndex = 0, -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload -def geomspace( +def geomspace[ScalarT: np.generic]( start: _ArrayLikeComplex_co, stop: _ArrayLikeComplex_co, num: SupportsIndex = 50, endpoint: bool = True, *, - dtype: _DTypeLike[_ScalarT], + dtype: _DTypeLike[ScalarT], axis: SupportsIndex = 0, -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload def geomspace( start: _ArrayLikeComplex_co, diff --git a/numpy/_core/getlimits.py b/numpy/_core/getlimits.py index afa2ccebcfd2..3c03d81165fb 100644 --- a/numpy/_core/getlimits.py +++ b/numpy/_core/getlimits.py @@ -3,16 +3,15 @@ """ __all__ = ['finfo', 'iinfo'] +import math import types import warnings +from functools import cached_property from numpy._utils import set_module -from . import numeric -from . import numerictypes as ntypes -from ._machar import MachAr -from .numeric import array, inf, nan -from .umath import exp2, isnan, log10, nextafter +from . import numeric, numerictypes as ntypes +from ._multiarray_umath import _populate_finfo_constants def _fr0(a): @@ -31,96 +30,6 @@ def _fr1(a): return a -class MachArLike: - """ Object to simulate MachAr instance """ - def __init__(self, ftype, *, eps, epsneg, huge, tiny, - ibeta, smallest_subnormal=None, **kwargs): - self.params = _MACHAR_PARAMS[ftype] - self.ftype = ftype - self.title = self.params['title'] - # Parameter types same as for discovered MachAr object. - if not smallest_subnormal: - self._smallest_subnormal = nextafter( - self.ftype(0), self.ftype(1), dtype=self.ftype) - else: - self._smallest_subnormal = smallest_subnormal - self.epsilon = self.eps = self._float_to_float(eps) - self.epsneg = self._float_to_float(epsneg) - self.xmax = self.huge = self._float_to_float(huge) - self.xmin = self._float_to_float(tiny) - self.smallest_normal = self.tiny = self._float_to_float(tiny) - self.ibeta = self.params['itype'](ibeta) - self.__dict__.update(kwargs) - self.precision = int(-log10(self.eps)) - self.resolution = self._float_to_float( - self._float_conv(10) ** (-self.precision)) - self._str_eps = self._float_to_str(self.eps) - self._str_epsneg = self._float_to_str(self.epsneg) - self._str_xmin = self._float_to_str(self.xmin) - self._str_xmax = self._float_to_str(self.xmax) - self._str_resolution = self._float_to_str(self.resolution) - self._str_smallest_normal = self._float_to_str(self.xmin) - - @property - def smallest_subnormal(self): - """Return the value for the smallest subnormal. - - Returns - ------- - smallest_subnormal : float - value for the smallest subnormal. - - Warns - ----- - UserWarning - If the calculated value for the smallest subnormal is zero. - """ - # Check that the calculated value is not zero, in case it raises a - # warning. - value = self._smallest_subnormal - if self.ftype(0) == value: - warnings.warn( - f'The value of the smallest subnormal for {self.ftype} type is zero.', - UserWarning, stacklevel=2) - - return self._float_to_float(value) - - @property - def _str_smallest_subnormal(self): - """Return the string representation of the smallest subnormal.""" - return self._float_to_str(self.smallest_subnormal) - - def _float_to_float(self, value): - """Converts float to float. - - Parameters - ---------- - value : float - value to be converted. - """ - return _fr1(self._float_conv(value)) - - def _float_conv(self, value): - """Converts float to conv. - - Parameters - ---------- - value : float - value to be converted. - """ - return array([value], self.ftype) - - def _float_to_str(self, value): - """Converts float to str. - - Parameters - ---------- - value : float - value to be converted. - """ - return self.params['fmt'] % array(_fr0(value)[0], self.ftype) - - _convert_to_float = { ntypes.csingle: ntypes.single, ntypes.complex128: ntypes.float64, @@ -147,240 +56,6 @@ def _float_to_str(self, value): 'fmt': '%12.5e', 'title': _title_fmt.format('half')}} -# Key to identify the floating point type. Key is result of -# -# ftype = np.longdouble # or float64, float32, etc. -# v = (ftype(-1.0) / ftype(10.0)) -# v.view(v.dtype.newbyteorder('<')).tobytes() -# -# Uses division to work around deficiencies in strtold on some platforms. -# See: -# https://perl5.git.perl.org/perl.git/blob/3118d7d684b56cbeb702af874f4326683c45f045:/Configure - -_KNOWN_TYPES = {} -def _register_type(machar, bytepat): - _KNOWN_TYPES[bytepat] = machar - - -_float_ma = {} - - -def _register_known_types(): - # Known parameters for float16 - # See docstring of MachAr class for description of parameters. - f16 = ntypes.float16 - float16_ma = MachArLike(f16, - machep=-10, - negep=-11, - minexp=-14, - maxexp=16, - it=10, - iexp=5, - ibeta=2, - irnd=5, - ngrd=0, - eps=exp2(f16(-10)), - epsneg=exp2(f16(-11)), - huge=f16(65504), - tiny=f16(2 ** -14)) - _register_type(float16_ma, b'f\xae') - _float_ma[16] = float16_ma - - # Known parameters for float32 - f32 = ntypes.float32 - float32_ma = MachArLike(f32, - machep=-23, - negep=-24, - minexp=-126, - maxexp=128, - it=23, - iexp=8, - ibeta=2, - irnd=5, - ngrd=0, - eps=exp2(f32(-23)), - epsneg=exp2(f32(-24)), - huge=f32((1 - 2 ** -24) * 2**128), - tiny=exp2(f32(-126))) - _register_type(float32_ma, b'\xcd\xcc\xcc\xbd') - _float_ma[32] = float32_ma - - # Known parameters for float64 - f64 = ntypes.float64 - epsneg_f64 = 2.0 ** -53.0 - tiny_f64 = 2.0 ** -1022.0 - float64_ma = MachArLike(f64, - machep=-52, - negep=-53, - minexp=-1022, - maxexp=1024, - it=52, - iexp=11, - ibeta=2, - irnd=5, - ngrd=0, - eps=2.0 ** -52.0, - epsneg=epsneg_f64, - huge=(1.0 - epsneg_f64) / tiny_f64 * f64(4), - tiny=tiny_f64) - _register_type(float64_ma, b'\x9a\x99\x99\x99\x99\x99\xb9\xbf') - _float_ma[64] = float64_ma - - # Known parameters for IEEE 754 128-bit binary float - ld = ntypes.longdouble - epsneg_f128 = exp2(ld(-113)) - tiny_f128 = exp2(ld(-16382)) - # Ignore runtime error when this is not f128 - with numeric.errstate(all='ignore'): - huge_f128 = (ld(1) - epsneg_f128) / tiny_f128 * ld(4) - float128_ma = MachArLike(ld, - machep=-112, - negep=-113, - minexp=-16382, - maxexp=16384, - it=112, - iexp=15, - ibeta=2, - irnd=5, - ngrd=0, - eps=exp2(ld(-112)), - epsneg=epsneg_f128, - huge=huge_f128, - tiny=tiny_f128) - # IEEE 754 128-bit binary float - _register_type(float128_ma, - b'\x9a\x99\x99\x99\x99\x99\x99\x99\x99\x99\x99\x99\x99\x99\xfb\xbf') - _float_ma[128] = float128_ma - - # Known parameters for float80 (Intel 80-bit extended precision) - epsneg_f80 = exp2(ld(-64)) - tiny_f80 = exp2(ld(-16382)) - # Ignore runtime error when this is not f80 - with numeric.errstate(all='ignore'): - huge_f80 = (ld(1) - epsneg_f80) / tiny_f80 * ld(4) - float80_ma = MachArLike(ld, - machep=-63, - negep=-64, - minexp=-16382, - maxexp=16384, - it=63, - iexp=15, - ibeta=2, - irnd=5, - ngrd=0, - eps=exp2(ld(-63)), - epsneg=epsneg_f80, - huge=huge_f80, - tiny=tiny_f80) - # float80, first 10 bytes containing actual storage - _register_type(float80_ma, b'\xcd\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xfb\xbf') - _float_ma[80] = float80_ma - - # Guessed / known parameters for double double; see: - # https://en.wikipedia.org/wiki/Quadruple-precision_floating-point_format#Double-double_arithmetic - # These numbers have the same exponent range as float64, but extended - # number of digits in the significand. - huge_dd = nextafter(ld(inf), ld(0), dtype=ld) - # As the smallest_normal in double double is so hard to calculate we set - # it to NaN. - smallest_normal_dd = nan - # Leave the same value for the smallest subnormal as double - smallest_subnormal_dd = ld(nextafter(0., 1.)) - float_dd_ma = MachArLike(ld, - machep=-105, - negep=-106, - minexp=-1022, - maxexp=1024, - it=105, - iexp=11, - ibeta=2, - irnd=5, - ngrd=0, - eps=exp2(ld(-105)), - epsneg=exp2(ld(-106)), - huge=huge_dd, - tiny=smallest_normal_dd, - smallest_subnormal=smallest_subnormal_dd) - # double double; low, high order (e.g. PPC 64) - _register_type(float_dd_ma, - b'\x9a\x99\x99\x99\x99\x99Y<\x9a\x99\x99\x99\x99\x99\xb9\xbf') - # double double; high, low order (e.g. PPC 64 le) - _register_type(float_dd_ma, - b'\x9a\x99\x99\x99\x99\x99\xb9\xbf\x9a\x99\x99\x99\x99\x99Y<') - _float_ma['dd'] = float_dd_ma - - -def _get_machar(ftype): - """ Get MachAr instance or MachAr-like instance - - Get parameters for floating point type, by first trying signatures of - various known floating point types, then, if none match, attempting to - identify parameters by analysis. - - Parameters - ---------- - ftype : class - Numpy floating point type class (e.g. ``np.float64``) - - Returns - ------- - ma_like : instance of :class:`MachAr` or :class:`MachArLike` - Object giving floating point parameters for `ftype`. - - Warns - ----- - UserWarning - If the binary signature of the float type is not in the dictionary of - known float types. - """ - params = _MACHAR_PARAMS.get(ftype) - if params is None: - raise ValueError(repr(ftype)) - # Detect known / suspected types - # ftype(-1.0) / ftype(10.0) is better than ftype('-0.1') because stold - # may be deficient - key = (ftype(-1.0) / ftype(10.)) - key = key.view(key.dtype.newbyteorder("<")).tobytes() - ma_like = None - if ftype == ntypes.longdouble: - # Could be 80 bit == 10 byte extended precision, where last bytes can - # be random garbage. - # Comparing first 10 bytes to pattern first to avoid branching on the - # random garbage. - ma_like = _KNOWN_TYPES.get(key[:10]) - if ma_like is None: - # see if the full key is known. - ma_like = _KNOWN_TYPES.get(key) - if ma_like is None and len(key) == 16: - # machine limits could be f80 masquerading as np.float128, - # find all keys with length 16 and make new dict, but make the keys - # only 10 bytes long, the last bytes can be random garbage - _kt = {k[:10]: v for k, v in _KNOWN_TYPES.items() if len(k) == 16} - ma_like = _kt.get(key[:10]) - if ma_like is not None: - return ma_like - # Fall back to parameter discovery - warnings.warn( - f'Signature {key} for {ftype} does not match any known type: ' - 'falling back to type probe function.\n' - 'This warnings indicates broken support for the dtype!', - UserWarning, stacklevel=2) - return _discovered_machar(ftype) - - -def _discovered_machar(ftype): - """ Create MachAr instance with found information on float types - - TODO: MachAr should be retired completely ideally. We currently only - ever use it system with broken longdouble (valgrind, WSL). - """ - params = _MACHAR_PARAMS[ftype] - return MachAr(lambda v: array([v], ftype), - lambda v: _fr0(v.astype(params['itype']))[0], - lambda v: array(_fr0(v)[0], ftype), - lambda v: params['fmt'] % array(_fr0(v)[0], ftype), - params['title']) - @set_module('numpy') class finfo: @@ -414,17 +89,20 @@ class finfo: The largest representable number. maxexp : int The smallest positive power of the base (2) that causes overflow. + Corresponds to the C standard MAX_EXP. min : floating point number of the appropriate type The smallest representable number, typically ``-max``. minexp : int The most negative power of the base (2) consistent with there - being no leading 0's in the mantissa. + being no leading 0's in the mantissa. Corresponds to the C + standard MIN_EXP - 1. negep : int The exponent that yields `epsneg`. nexp : int The number of bits in the exponent including its sign and bias. nmant : int - The number of bits in the mantissa. + The number of explicit bits in the mantissa (excluding the implicit + leading bit for normalized numbers). precision : int The approximate number of decimal digits to which this kind of float is precise. @@ -465,6 +143,12 @@ class finfo: fill the gap between 0 and ``smallest_normal``. However, subnormal numbers may have significantly reduced precision [2]_. + For ``longdouble``, the representation varies across platforms. On most + platforms it is IEEE 754 binary128 (quad precision) or binary64-extended + (80-bit extended precision). On PowerPC systems, it may use the IBM + double-double format (a pair of float64 values), which has special + characteristics for precision and range. + This function can also be used for complex data types as well. If used, the output will be the same as the corresponding real float type (e.g. numpy.finfo(numpy.csingle) is the same as numpy.finfo(numpy.single)). @@ -549,77 +233,107 @@ def __new__(cls, dtype): def _init(self, dtype): self.dtype = numeric.dtype(dtype) - machar = _get_machar(dtype) - - for word in ['precision', 'iexp', - 'maxexp', 'minexp', 'negep', - 'machep']: - setattr(self, word, getattr(machar, word)) - for word in ['resolution', 'epsneg', 'smallest_subnormal']: - setattr(self, word, getattr(machar, word).flat[0]) self.bits = self.dtype.itemsize * 8 - self.max = machar.huge.flat[0] - self.min = -self.max - self.eps = machar.eps.flat[0] - self.nexp = machar.iexp - self.nmant = machar.it - self._machar = machar - self._str_tiny = machar._str_xmin.strip() - self._str_max = machar._str_xmax.strip() - self._str_epsneg = machar._str_epsneg.strip() - self._str_eps = machar._str_eps.strip() - self._str_resolution = machar._str_resolution.strip() - self._str_smallest_normal = machar._str_smallest_normal.strip() - self._str_smallest_subnormal = machar._str_smallest_subnormal.strip() + self._fmt = None + self._repr = None + _populate_finfo_constants(self, self.dtype) return self + @cached_property + def epsneg(self): + # Assume typical floating point logic. Could also use nextafter. + return self.eps / self._radix + + @cached_property + def resolution(self): + return self.dtype.type(10)**-self.precision + + @cached_property + def machep(self): + return int(math.log2(self.eps)) + + @cached_property + def negep(self): + return int(math.log2(self.epsneg)) + + @cached_property + def nexp(self): + # considering all ones (inf/nan) and all zeros (subnormal/zero) + return math.ceil(math.log2(self.maxexp - self.minexp + 2)) + + @cached_property + def iexp(self): + # Calculate exponent bits from it's range: + return math.ceil(math.log2(self.maxexp - self.minexp)) + def __str__(self): + if (fmt := getattr(self, "_fmt", None)) is not None: + return fmt + + def get_str(name, pad=None): + if (val := getattr(self, name, None)) is None: + return "" + if pad is not None: + s = str(val).ljust(pad) + return str(val) + + precision = get_str("precision", 3) + machep = get_str("machep", 6) + negep = get_str("negep", 6) + minexp = get_str("minexp", 6) + maxexp = get_str("maxexp", 6) + resolution = get_str("resolution") + eps = get_str("eps") + epsneg = get_str("epsneg") + tiny = get_str("tiny") + smallest_normal = get_str("smallest_normal") + smallest_subnormal = get_str("smallest_subnormal") + nexp = get_str("nexp", 6) + max_ = get_str("max") + if hasattr(self, "min") and hasattr(self, "max") and -self.min == self.max: + min_ = "-max" + else: + min_ = get_str("min") + fmt = ( - 'Machine parameters for %(dtype)s\n' - '---------------------------------------------------------------\n' - 'precision = %(precision)3s resolution = %(_str_resolution)s\n' - 'machep = %(machep)6s eps = %(_str_eps)s\n' - 'negep = %(negep)6s epsneg = %(_str_epsneg)s\n' - 'minexp = %(minexp)6s tiny = %(_str_tiny)s\n' - 'maxexp = %(maxexp)6s max = %(_str_max)s\n' - 'nexp = %(nexp)6s min = -max\n' - 'smallest_normal = %(_str_smallest_normal)s ' - 'smallest_subnormal = %(_str_smallest_subnormal)s\n' - '---------------------------------------------------------------\n' - ) - return fmt % self.__dict__ + f'Machine parameters for {self.dtype}\n' + f'---------------------------------------------------------------\n' + f'precision = {precision} resolution = {resolution}\n' + f'machep = {machep} eps = {eps}\n' + f'negep = {negep} epsneg = {epsneg}\n' + f'minexp = {minexp} tiny = {tiny}\n' + f'maxexp = {maxexp} max = {max_}\n' + f'nexp = {nexp} min = {min_}\n' + f'smallest_normal = {smallest_normal} ' + f'smallest_subnormal = {smallest_subnormal}\n' + f'---------------------------------------------------------------\n' + ) + self._fmt = fmt + return fmt def __repr__(self): + if (repr_str := getattr(self, "_repr", None)) is not None: + return repr_str + c = self.__class__.__name__ - d = self.__dict__.copy() - d['klass'] = c - return (("%(klass)s(resolution=%(resolution)s, min=-%(_str_max)s," - " max=%(_str_max)s, dtype=%(dtype)s)") % d) - @property - def smallest_normal(self): - """Return the value for the smallest normal. + # Use precision+1 digits in exponential notation + fmt_str = _MACHAR_PARAMS.get(self.dtype.type, {}).get('fmt', '%s') + if fmt_str != '%s' and hasattr(self, 'max') and hasattr(self, 'min'): + max_str = (fmt_str % self.max).strip() + min_str = (fmt_str % self.min).strip() + else: + max_str = str(self.max) + min_str = str(self.min) - Returns - ------- - smallest_normal : float - Value for the smallest normal. + resolution_str = str(self.resolution) - Warns - ----- - UserWarning - If the calculated value for the smallest normal is requested for - double-double. - """ - # This check is necessary because the value for smallest_normal is - # platform dependent for longdouble types. - if isnan(self._machar.smallest_normal.flat[0]): - warnings.warn( - 'The value of smallest normal is undefined for double double', - UserWarning, stacklevel=2) - return self._machar.smallest_normal.flat[0] + repr_str = (f"{c}(resolution={resolution_str}, min={min_str}," + f" max={max_str}, dtype={self.dtype})") + self._repr = repr_str + return repr_str - @property + @cached_property def tiny(self): """Return the value for tiny, alias of smallest_normal. diff --git a/numpy/_core/getlimits.pyi b/numpy/_core/getlimits.pyi index 9d79b178f4dc..a22149ceb5c6 100644 --- a/numpy/_core/getlimits.pyi +++ b/numpy/_core/getlimits.pyi @@ -1,3 +1,124 @@ -from numpy import finfo, iinfo +from functools import cached_property +from types import GenericAlias +from typing import Final, Generic, Self, overload +from typing_extensions import TypeVar + +import numpy as np +from numpy._typing import ( + _CLongDoubleCodes, + _Complex64Codes, + _Complex128Codes, + _DTypeLike, + _Float16Codes, + _Float32Codes, + _Float64Codes, + _Int8Codes, + _Int16Codes, + _Int32Codes, + _Int64Codes, + _IntPCodes, + _LongDoubleCodes, + _UInt8Codes, + _UInt16Codes, + _UInt32Codes, + _UInt64Codes, +) __all__ = ["finfo", "iinfo"] + +### + +_IntegerT_co = TypeVar("_IntegerT_co", bound=np.integer, default=np.integer, covariant=True) +_FloatingT_co = TypeVar("_FloatingT_co", bound=np.floating, default=np.floating, covariant=True) + +### + +class iinfo(Generic[_IntegerT_co]): + dtype: np.dtype[_IntegerT_co] + bits: Final[int] + kind: Final[str] + key: Final[str] + + @property + def min(self, /) -> int: ... + @property + def max(self, /) -> int: ... + + # + @overload + def __init__(self, /, int_type: _IntegerT_co | _DTypeLike[_IntegerT_co]) -> None: ... + @overload + def __init__(self: iinfo[np.int_], /, int_type: _IntPCodes | type[int] | int) -> None: ... + @overload + def __init__(self: iinfo[np.int8], /, int_type: _Int8Codes) -> None: ... + @overload + def __init__(self: iinfo[np.uint8], /, int_type: _UInt8Codes) -> None: ... + @overload + def __init__(self: iinfo[np.int16], /, int_type: _Int16Codes) -> None: ... + @overload + def __init__(self: iinfo[np.uint16], /, int_type: _UInt16Codes) -> None: ... + @overload + def __init__(self: iinfo[np.int32], /, int_type: _Int32Codes) -> None: ... + @overload + def __init__(self: iinfo[np.uint32], /, int_type: _UInt32Codes) -> None: ... + @overload + def __init__(self: iinfo[np.int64], /, int_type: _Int64Codes) -> None: ... + @overload + def __init__(self: iinfo[np.uint64], /, int_type: _UInt64Codes) -> None: ... + @overload + def __init__(self, /, int_type: str) -> None: ... + + # + @classmethod + def __class_getitem__(cls, item: object, /) -> GenericAlias: ... + +class finfo(Generic[_FloatingT_co]): + dtype: np.dtype[_FloatingT_co] # readonly + eps: _FloatingT_co # readonly + _radix: _FloatingT_co # readonly + smallest_normal: _FloatingT_co # readonly + smallest_subnormal: _FloatingT_co # readonly + max: _FloatingT_co # readonly + min: _FloatingT_co # readonly + + _fmt: str | None # `__str__` cache + _repr: str | None # `__repr__` cache + + bits: Final[int] + maxexp: Final[int] + minexp: Final[int] + nmant: Final[int] + precision: Final[int] + + @classmethod + def __class_getitem__(cls, item: object, /) -> GenericAlias: ... + + # + @overload + def __new__(cls, dtype: _FloatingT_co | _DTypeLike[_FloatingT_co]) -> Self: ... + @overload + def __new__(cls, dtype: _Float16Codes) -> finfo[np.float16]: ... + @overload + def __new__(cls, dtype: _Float32Codes | _Complex64Codes | _DTypeLike[np.complex64]) -> finfo[np.float32]: ... + @overload + def __new__(cls, dtype: _Float64Codes | _Complex128Codes | type[complex] | complex) -> finfo[np.float64]: ... + @overload + def __new__(cls, dtype: _LongDoubleCodes | _CLongDoubleCodes | _DTypeLike[np.clongdouble]) -> finfo[np.longdouble]: ... + @overload + def __new__(cls, dtype: str) -> finfo: ... + + # + @cached_property + def epsneg(self, /) -> _FloatingT_co: ... + @cached_property + def resolution(self, /) -> _FloatingT_co: ... + @cached_property + def machep(self, /) -> int: ... + @cached_property + def negep(self, /) -> int: ... + @cached_property + def nexp(self, /) -> int: ... + @cached_property + def iexp(self, /) -> int: ... + @cached_property + def tiny(self, /) -> _FloatingT_co: ... diff --git a/numpy/_core/include/numpy/arrayscalars.h b/numpy/_core/include/numpy/arrayscalars.h index ff048061f70a..46bc58cc2a35 100644 --- a/numpy/_core/include/numpy/arrayscalars.h +++ b/numpy/_core/include/numpy/arrayscalars.h @@ -173,9 +173,11 @@ typedef struct { #define PyArrayScalar_True ((PyObject *)(&(_PyArrayScalar_BoolValues[1]))) #define PyArrayScalar_FromLong(i) \ ((PyObject *)(&(_PyArrayScalar_BoolValues[((i)!=0)]))) -#define PyArrayScalar_RETURN_BOOL_FROM_LONG(i) \ - return Py_INCREF(PyArrayScalar_FromLong(i)), \ - PyArrayScalar_FromLong(i) +#define PyArrayScalar_RETURN_BOOL_FROM_LONG(i) do { \ + PyObject *obj = PyArrayScalar_FromLong(i); \ + Py_INCREF(obj); \ + return obj; \ +} while (0) #define PyArrayScalar_RETURN_FALSE \ return Py_INCREF(PyArrayScalar_False), \ PyArrayScalar_False diff --git a/numpy/_core/include/numpy/dtype_api.h b/numpy/_core/include/numpy/dtype_api.h index b37c9fbb6821..5ac964782ec0 100644 --- a/numpy/_core/include/numpy/dtype_api.h +++ b/numpy/_core/include/numpy/dtype_api.h @@ -99,6 +99,11 @@ typedef enum { } NPY_ARRAYMETHOD_FLAGS; +typedef enum { + /* Casting via same_value logic */ + NPY_SAME_VALUE_CONTEXT_FLAG=1, +} NPY_ARRAYMETHOD_CONTEXT_FLAGS; + typedef struct PyArrayMethod_Context_tag { /* The caller, which is typically the original ufunc. May be NULL */ PyObject *caller; @@ -107,7 +112,22 @@ typedef struct PyArrayMethod_Context_tag { /* Operand descriptors, filled in by resolve_descriptors */ PyArray_Descr *const *descriptors; + #if NPY_FEATURE_VERSION > NPY_2_3_API_VERSION + void * _reserved; + /* + * Optional flag to pass information into the inner loop + * NPY_ARRAYMETHOD_CONTEXT_FLAGS + */ + uint64_t flags; + + /* + * Optional run-time parameters to pass to the loop (currently used in sorting). + * Fixed parameters are expected to be passed via auxdata. + */ + void *parameters; + /* Structure may grow (this is harmless for DType authors) */ + #endif } PyArrayMethod_Context; @@ -125,6 +145,13 @@ typedef struct { } PyArrayMethod_Spec; +// This is used for the convenience function `PyUFunc_AddLoopsFromSpecs` +typedef struct { + const char *name; + PyArrayMethod_Spec *spec; +} PyUFunc_LoopSlot; + + /* * ArrayMethod slots * ----------------- @@ -144,7 +171,6 @@ typedef struct { #define NPY_METH_contiguous_indexed_loop 9 #define _NPY_METH_static_data 10 - /* * The resolve descriptors function, must be able to handle NULL values for * all output (but not input) `given_descrs` and fill `loop_descrs`. @@ -367,6 +393,7 @@ typedef int (PyArrayMethod_PromoterFunction)(PyObject *ufunc, #define NPY_DT_get_clear_loop 9 #define NPY_DT_get_fill_zero_loop 10 #define NPY_DT_finalize_descr 11 +#define NPY_DT_get_constant 12 // These PyArray_ArrFunc slots will be deprecated and replaced eventually // getitem and setitem can be defined as a performance optimization; @@ -377,7 +404,7 @@ typedef int (PyArrayMethod_PromoterFunction)(PyObject *ufunc, // used to separate dtype slots from arrfuncs slots // intended only for internal use but defined here for clarity -#define _NPY_DT_ARRFUNCS_OFFSET (1 << 10) +#define _NPY_DT_ARRFUNCS_OFFSET (1 << 11) // Cast is disabled // #define NPY_DT_PyArray_ArrFuncs_cast 0 + _NPY_DT_ARRFUNCS_OFFSET @@ -467,6 +494,42 @@ typedef PyArray_Descr *(PyArrayDTypeMeta_EnsureCanonical)(PyArray_Descr *dtype); */ typedef PyArray_Descr *(PyArrayDTypeMeta_FinalizeDescriptor)(PyArray_Descr *dtype); +/* + * Constants that can be queried and used e.g. by reduce identies defaults. + * These are also used to expose .finfo and .iinfo for example. + */ +/* Numerical constants */ +#define NPY_CONSTANT_zero 1 +#define NPY_CONSTANT_one 2 +#define NPY_CONSTANT_all_bits_set 3 +#define NPY_CONSTANT_maximum_finite 4 +#define NPY_CONSTANT_minimum_finite 5 +#define NPY_CONSTANT_inf 6 +#define NPY_CONSTANT_ninf 7 +#define NPY_CONSTANT_nan 8 +#define NPY_CONSTANT_finfo_radix 9 +#define NPY_CONSTANT_finfo_eps 10 +#define NPY_CONSTANT_finfo_smallest_normal 11 +#define NPY_CONSTANT_finfo_smallest_subnormal 12 +/* Constants that are always of integer type, value is `npy_intp/Py_ssize_t` */ +#define NPY_CONSTANT_finfo_nmant (1 << 16) + 0 +#define NPY_CONSTANT_finfo_min_exp (1 << 16) + 1 +#define NPY_CONSTANT_finfo_max_exp (1 << 16) + 2 +#define NPY_CONSTANT_finfo_decimal_digits (1 << 16) + 3 + +/* It may make sense to continue with other constants here, e.g. pi, etc? */ + +/* + * Function to get a constant value for the dtype. Data may be unaligned, the + * function is always called with the GIL held. + * + * @param descr The dtype instance (i.e. self) + * @param ID The ID of the constant to get. + * @param data Pointer to the data to be written too, may be unaligned. + * @returns 1 on success, 0 if the constant is not available, or -1 with an error set. + */ +typedef int (PyArrayDTypeMeta_GetConstant)(PyArray_Descr *descr, int ID, void *data); + /* * TODO: These two functions are currently only used for experimental DType * API support. Their relation should be "reversed": NumPy should @@ -477,4 +540,8 @@ typedef PyArray_Descr *(PyArrayDTypeMeta_FinalizeDescriptor)(PyArray_Descr *dtyp typedef int(PyArrayDTypeMeta_SetItem)(PyArray_Descr *, PyObject *, char *); typedef PyObject *(PyArrayDTypeMeta_GetItem)(PyArray_Descr *, char *); +typedef struct { + NPY_SORTKIND flags; +} PyArrayMethod_SortParameters; + #endif /* NUMPY_CORE_INCLUDE_NUMPY___DTYPE_API_H_ */ diff --git a/numpy/_core/include/numpy/ndarraytypes.h b/numpy/_core/include/numpy/ndarraytypes.h index baa42406ac88..f740788f3720 100644 --- a/numpy/_core/include/numpy/ndarraytypes.h +++ b/numpy/_core/include/numpy/ndarraytypes.h @@ -162,18 +162,37 @@ enum NPY_TYPECHAR { }; /* - * Changing this may break Numpy API compatibility - * due to changing offsets in PyArray_ArrFuncs, so be - * careful. Here we have reused the mergesort slot for - * any kind of stable sort, the actual implementation will - * depend on the data type. + * Changing this may break Numpy API compatibility due to changing offsets in + * PyArray_ArrFuncs, so be careful. Here we have reused the mergesort slot for + * any kind of stable sort, the actual implementation will depend on the data + * type. + * + * Updated in NumPy 2.4 + * + * Updated with new names denoting requirements rather than specifying a + * particular algorithm. All the previous values are reused in a way that + * should be downstream compatible, but the actual algorithms used may be + * different than before. The new approach should be more flexible and easier + * to update. + * + * Names with a leading underscore are private, and should only be used + * internally by NumPy. + * + * NPY_NSORTS remains the same for backwards compatibility, it should not be + * changed. */ + typedef enum { - _NPY_SORT_UNDEFINED=-1, - NPY_QUICKSORT=0, - NPY_HEAPSORT=1, - NPY_MERGESORT=2, - NPY_STABLESORT=2, + _NPY_SORT_UNDEFINED = -1, + NPY_QUICKSORT = 0, + NPY_HEAPSORT = 1, + NPY_MERGESORT = 2, + NPY_STABLESORT = 2, + // new style names + _NPY_SORT_HEAPSORT = 1, + NPY_SORT_DEFAULT = 0, + NPY_SORT_STABLE = 2, + NPY_SORT_DESCENDING = 4, } NPY_SORTKIND; #define NPY_NSORTS (NPY_STABLESORT + 1) @@ -214,6 +233,16 @@ typedef enum { NPY_KEEPORDER=2 } NPY_ORDER; +#if NPY_FEATURE_VERSION >= NPY_2_4_API_VERSION +/* + * check that no values overflow/change during casting + * Used explicitly only in the ArrayMethod creation or resolve_dtypes functions to + * indicate that a same-value cast is supported. In external APIs, use only + * NPY_SAME_VALUE_CASTING + */ +#define NPY_SAME_VALUE_CASTING_FLAG 64 +#endif + /* For specifying allowed casting in operations which support it */ typedef enum { _NPY_ERROR_OCCURRED_IN_CAST = -1, @@ -227,6 +256,9 @@ typedef enum { NPY_SAME_KIND_CASTING=3, /* Allow any casts */ NPY_UNSAFE_CASTING=4, +#if NPY_FEATURE_VERSION >= NPY_2_4_API_VERSION + NPY_SAME_VALUE_CASTING=NPY_UNSAFE_CASTING | NPY_SAME_VALUE_CASTING_FLAG, +#endif } NPY_CASTING; typedef enum { diff --git a/numpy/_core/include/numpy/npy_3kcompat.h b/numpy/_core/include/numpy/npy_3kcompat.h index c2bf74faf09d..cd91f66268c7 100644 --- a/numpy/_core/include/numpy/npy_3kcompat.h +++ b/numpy/_core/include/numpy/npy_3kcompat.h @@ -242,7 +242,7 @@ static inline PyObject* npy_PyFile_OpenFile(PyObject *filename, const char *mode) { PyObject *open; - open = PyDict_GetItemString(PyEval_GetBuiltins(), "open"); + open = PyDict_GetItemString(PyEval_GetBuiltins(), "open"); // noqa: borrowed-ref OK if (open == NULL) { return NULL; } diff --git a/numpy/_core/include/numpy/npy_common.h b/numpy/_core/include/numpy/npy_common.h index e2556a07a3ef..5eaa29035428 100644 --- a/numpy/_core/include/numpy/npy_common.h +++ b/numpy/_core/include/numpy/npy_common.h @@ -98,11 +98,23 @@ #endif #ifdef _MSC_VER - #define NPY_FINLINE static __forceinline + #ifdef __cplusplus + #define NPY_FINLINE __forceinline + #else + #define NPY_FINLINE static __forceinline + #endif #elif defined(__GNUC__) - #define NPY_FINLINE static inline __attribute__((always_inline)) + #ifdef __cplusplus + #define NPY_FINLINE inline __attribute__((always_inline)) + #else + #define NPY_FINLINE static inline __attribute__((always_inline)) + #endif #else - #define NPY_FINLINE static + #ifdef __cplusplus + #define NPY_FINLINE inline + #else + #define NPY_FINLINE static NPY_INLINE + #endif #endif #if defined(_MSC_VER) diff --git a/numpy/_core/include/numpy/npy_cpu.h b/numpy/_core/include/numpy/npy_cpu.h index 4fb3fb406869..d3a29da57f36 100644 --- a/numpy/_core/include/numpy/npy_cpu.h +++ b/numpy/_core/include/numpy/npy_cpu.h @@ -20,6 +20,7 @@ * NPY_CPU_RISCV64 * NPY_CPU_RISCV32 * NPY_CPU_LOONGARCH + * NPY_CPU_SW_64 * NPY_CPU_WASM */ #ifndef NUMPY_CORE_INCLUDE_NUMPY_NPY_CPU_H_ @@ -111,24 +112,15 @@ #endif #elif defined(__loongarch_lp64) #define NPY_CPU_LOONGARCH64 -#elif defined(__EMSCRIPTEN__) +#elif defined(__sw_64__) + #define NPY_CPU_SW_64 +#elif defined(__EMSCRIPTEN__) || defined(__wasm__) /* __EMSCRIPTEN__ is defined by emscripten: an LLVM-to-Web compiler */ + /* __wasm__ is defined by clang when targeting wasm */ #define NPY_CPU_WASM #else #error Unknown CPU, please report this to numpy maintainers with \ information about your platform (OS, CPU and compiler) #endif -/* - * Except for the following architectures, memory access is limited to the natural - * alignment of data types otherwise it may lead to bus error or performance regression. - * For more details about unaligned access, see https://www.kernel.org/doc/Documentation/unaligned-memory-access.txt. -*/ -#if defined(NPY_CPU_X86) || defined(NPY_CPU_AMD64) || defined(__aarch64__) || defined(__powerpc64__) - #define NPY_ALIGNMENT_REQUIRED 0 -#endif -#ifndef NPY_ALIGNMENT_REQUIRED - #define NPY_ALIGNMENT_REQUIRED 1 -#endif - #endif /* NUMPY_CORE_INCLUDE_NUMPY_NPY_CPU_H_ */ diff --git a/numpy/_core/include/numpy/npy_endian.h b/numpy/_core/include/numpy/npy_endian.h index 09262120bf82..ecb4b000763d 100644 --- a/numpy/_core/include/numpy/npy_endian.h +++ b/numpy/_core/include/numpy/npy_endian.h @@ -51,6 +51,7 @@ || defined(NPY_CPU_RISCV64) \ || defined(NPY_CPU_RISCV32) \ || defined(NPY_CPU_LOONGARCH) \ + || defined(NPY_CPU_SW_64) \ || defined(NPY_CPU_WASM) #define NPY_BYTE_ORDER NPY_LITTLE_ENDIAN diff --git a/numpy/_core/include/numpy/numpyconfig.h b/numpy/_core/include/numpy/numpyconfig.h index ba44c28b9d0f..40b5f1454d67 100644 --- a/numpy/_core/include/numpy/numpyconfig.h +++ b/numpy/_core/include/numpy/numpyconfig.h @@ -7,12 +7,6 @@ * On Mac OS X, because there is only one configuration stage for all the archs * in universal builds, any macro which depends on the arch needs to be * hardcoded. - * - * Note that distutils/pip will attempt a universal2 build when Python itself - * is built as universal2, hence this hardcoding is needed even if we do not - * support universal2 wheels anymore (see gh-22796). - * This code block can be removed after we have dropped the setup.py based - * build completely. */ #ifdef __APPLE__ #undef NPY_SIZEOF_LONG @@ -84,6 +78,8 @@ #define NPY_2_1_API_VERSION 0x00000013 #define NPY_2_2_API_VERSION 0x00000013 #define NPY_2_3_API_VERSION 0x00000014 +#define NPY_2_4_API_VERSION 0x00000015 +#define NPY_2_5_API_VERSION 0x00000016 /* @@ -106,10 +102,11 @@ * default, or narrow it down if they wish to use newer API. If you adjust * this, consider the Python version support (example for 1.25.x): * - * NumPy 1.25.x supports Python: 3.9 3.10 3.11 (3.12) - * NumPy 1.19.x supports Python: 3.6 3.7 3.8 3.9 - * NumPy 1.17.x supports Python: 3.5 3.6 3.7 3.8 - * NumPy 1.15.x supports Python: ... 3.6 3.7 + * NumPy 1.26.x supports Python: 3.9 3.10 3.11 3.12 + * NumPy 1.25.x supports Python: 3.9 3.10 3.11 + * NumPy 1.19.x supports Python: 3.6 3.7 3.8 3.9 + * NumPy 1.17.x supports Python: 3.5 3.6 3.7 3.8 + * NumPy 1.15.x supports Python: ... 3.6 3.7 * * Users of the stable ABI may wish to target the last Python that is not * end of life. This would be 3.8 at NumPy 1.25 release time. @@ -123,8 +120,8 @@ /* user provided a target version, use it */ #define NPY_FEATURE_VERSION NPY_TARGET_VERSION #else - /* Use the default (increase when dropping Python 3.11 support) */ - #define NPY_FEATURE_VERSION NPY_1_23_API_VERSION + /* Use the default (increase when dropping Python 3.12 support) */ + #define NPY_FEATURE_VERSION NPY_1_25_API_VERSION #endif /* Sanity check the (requested) feature version */ @@ -174,6 +171,10 @@ #define NPY_FEATURE_VERSION_STRING "2.1" #elif NPY_FEATURE_VERSION == NPY_2_3_API_VERSION #define NPY_FEATURE_VERSION_STRING "2.3" +#elif NPY_FEATURE_VERSION == NPY_2_4_API_VERSION + #define NPY_FEATURE_VERSION_STRING "2.4" +#elif NPY_FEATURE_VERSION == NPY_2_5_API_VERSION + #define NPY_FEATURE_VERSION_STRING "2.5" #else #error "Missing version string define for new NumPy version." #endif diff --git a/numpy/_core/memmap.py b/numpy/_core/memmap.py index 8cfa7f94a8da..e0b638c6f976 100644 --- a/numpy/_core/memmap.py +++ b/numpy/_core/memmap.py @@ -130,7 +130,7 @@ class memmap(ndarray): Examples -------- >>> import numpy as np - >>> data = np.arange(12, dtype='float32') + >>> data = np.arange(12, dtype=np.float32) >>> data.resize((3,4)) This example uses a temporary file so that doctest doesn't write @@ -142,7 +142,7 @@ class memmap(ndarray): Create a memmap with dtype and shape that matches our data: - >>> fp = np.memmap(filename, dtype='float32', mode='w+', shape=(3,4)) + >>> fp = np.memmap(filename, dtype=np.float32, mode='w+', shape=(3,4)) >>> fp memmap([[0., 0., 0., 0.], [0., 0., 0., 0.], @@ -165,7 +165,7 @@ class memmap(ndarray): Load the memmap and verify data was stored: - >>> newfp = np.memmap(filename, dtype='float32', mode='r', shape=(3,4)) + >>> newfp = np.memmap(filename, dtype=np.float32, mode='r', shape=(3,4)) >>> newfp memmap([[ 0., 1., 2., 3.], [ 4., 5., 6., 7.], @@ -173,13 +173,13 @@ class memmap(ndarray): Read-only memmap: - >>> fpr = np.memmap(filename, dtype='float32', mode='r', shape=(3,4)) + >>> fpr = np.memmap(filename, dtype=np.float32, mode='r', shape=(3,4)) >>> fpr.flags.writeable False Copy-on-write memmap: - >>> fpc = np.memmap(filename, dtype='float32', mode='c', shape=(3,4)) + >>> fpc = np.memmap(filename, dtype=np.float32, mode='c', shape=(3,4)) >>> fpc.flags.writeable True @@ -205,7 +205,7 @@ class memmap(ndarray): Offset into a memmap: - >>> fpo = np.memmap(filename, dtype='float32', mode='r', offset=16) + >>> fpo = np.memmap(filename, dtype=np.float32, mode='r', offset=16) >>> fpo memmap([ 4., 5., 6., 7., 8., 9., 10., 11.], dtype=float32) diff --git a/numpy/_core/meson.build b/numpy/_core/meson.build index a4d2050122c6..aa4da9c11146 100644 --- a/numpy/_core/meson.build +++ b/numpy/_core/meson.build @@ -50,7 +50,9 @@ C_ABI_VERSION = '0x02000000' # 0x00000013 - 2.1.x # 0x00000013 - 2.2.x # 0x00000014 - 2.3.x -C_API_VERSION = '0x00000014' +# 0x00000015 - 2.4.x +# 0x00000015 - 2.5.x +C_API_VERSION = '0x00000015' # Check whether we have a mismatch between the set C API VERSION and the # actual C API VERSION. Will raise a MismatchCAPIError if so. @@ -89,7 +91,7 @@ cpu_family = host_machine.cpu_family() use_svml = ( host_machine.system() == 'linux' and cpu_family == 'x86_64' and - ('AVX512_SKX' in CPU_DISPATCH_NAMES or 'AVX512_SKX' in CPU_BASELINE_NAMES) and + ('X86_V4' in CPU_DISPATCH_NAMES or 'X86_V4' in CPU_BASELINE_NAMES) and not get_option('disable-svml') ) if use_svml @@ -522,6 +524,7 @@ endif if longdouble_format == 'UNKNOWN' or longdouble_format == 'UNDEFINED' error('Unknown long double format of size: ' + cc.sizeof('long double').to_string()) endif +message(f'Long double format: @longdouble_format@') cdata.set10('HAVE_LDOUBLE_' + longdouble_format, true) if cc.has_header('endian.h') @@ -757,6 +760,7 @@ py.extension_module('_multiarray_tests', 'src/common/mem_overlap.c', 'src/common/npy_argparse.c', 'src/common/npy_hashtable.cpp', + 'src/common/npy_import.c', src_file.process('src/common/templ_common.h.src') ], c_args: c_args_common, @@ -773,10 +777,10 @@ _umath_tests_mtargets = mod_features.multi_targets( '_umath_tests.dispatch.h', 'src/umath/_umath_tests.dispatch.c', dispatch: [ - AVX2, SSE41, SSE2, + X86_V3, X86_V2, ASIMDHP, ASIMD, NEON, VSX3, VSX2, VSX, - VXE, VX, + VXE, VX, RVV, ], baseline: CPU_BASELINE, prefix: 'NPY_', @@ -818,7 +822,7 @@ foreach gen_mtargets : [ 'argfunc.dispatch.h', src_file.process('src/multiarray/argfunc.dispatch.c.src'), [ - AVX512_SKX, AVX2, XOP, SSE42, SSE2, + X86_V4, X86_V3, X86_V2, VSX2, ASIMD, NEON, VXE, VX @@ -855,17 +859,19 @@ foreach gen_mtargets : [ [ 'x86_simd_argsort.dispatch.h', 'src/npysort/x86_simd_argsort.dispatch.cpp', - use_intel_sort ? [AVX512_SKX, AVX2] : [] + use_intel_sort ? [X86_V4, X86_V3] : [] ], [ 'x86_simd_qsort.dispatch.h', 'src/npysort/x86_simd_qsort.dispatch.cpp', - use_intel_sort ? [AVX512_SKX, AVX2] : [] + use_intel_sort ? [X86_V4, X86_V3] : [] ], [ 'x86_simd_qsort_16bit.dispatch.h', 'src/npysort/x86_simd_qsort_16bit.dispatch.cpp', - use_intel_sort ? [AVX512_SPR, AVX512_ICL] : [] + # Do not enable AVX-512 on MSVC 32-bit (x86): it’s buggy there; + # Ref: NumPy issue numpy/numpy#29808 + use_intel_sort and not (cc.get_id() == 'msvc' and cpu_family == 'x86') ? [AVX512_SPR, AVX512_ICL] : [] ], [ 'highway_qsort.dispatch.h', @@ -922,7 +928,7 @@ foreach gen_mtargets : [ 'loops_arithm_fp.dispatch.h', src_file.process('src/umath/loops_arithm_fp.dispatch.c.src'), [ - [AVX2, FMA3], SSE2, + X86_V3, X86_V2, ASIMD, NEON, VSX3, VSX2, VXE, VX, @@ -933,7 +939,7 @@ foreach gen_mtargets : [ 'loops_arithmetic.dispatch.h', src_file.process('src/umath/loops_arithmetic.dispatch.c.src'), [ - AVX512_SKX, AVX512F, AVX2, SSE41, SSE2, + X86_V4, X86_V3, X86_V2, NEON, VSX4, VSX2, VX, @@ -944,7 +950,7 @@ foreach gen_mtargets : [ 'loops_comparison.dispatch.h', src_file.process('src/umath/loops_comparison.dispatch.c.src'), [ - AVX512_SKX, AVX512F, AVX2, SSE42, SSE2, + X86_V4, X86_V3, X86_V2, VSX3, VSX2, NEON, VXE, VX, @@ -955,14 +961,14 @@ foreach gen_mtargets : [ 'loops_exponent_log.dispatch.h', src_file.process('src/umath/loops_exponent_log.dispatch.c.src'), [ - AVX512_SKX, AVX512F, [AVX2, FMA3] + X86_V4, X86_V3, ] ], [ 'loops_hyperbolic.dispatch.h', src_file.process('src/umath/loops_hyperbolic.dispatch.cpp.src'), [ - AVX512_SKX, [AVX2, FMA3], + X86_V4, X86_V3, VSX4, VSX2, NEON_VFPV4, VXE, @@ -974,7 +980,7 @@ foreach gen_mtargets : [ 'src/umath/loops_logical.dispatch.cpp', [ ASIMD, NEON, - AVX512_SKX, AVX2, SSE2, + X86_V4, X86_V3, X86_V2, VSX2, VX, LSX, @@ -986,7 +992,7 @@ foreach gen_mtargets : [ src_file.process('src/umath/loops_minmax.dispatch.c.src'), [ ASIMD, NEON, - AVX512_SKX, AVX2, SSE2, + X86_V4, X86_V3, X86_V2, VSX2, VXE, VX, LSX, @@ -1003,7 +1009,7 @@ foreach gen_mtargets : [ 'loops_trigonometric.dispatch.h', 'src/umath/loops_trigonometric.dispatch.cpp', [ - AVX512_SKX, [AVX2, FMA3], + X86_V4, X86_V3, VSX4, VSX3, VSX2, NEON_VFPV4, VXE2, VXE, @@ -1013,14 +1019,14 @@ foreach gen_mtargets : [ [ 'loops_umath_fp.dispatch.h', src_file.process('src/umath/loops_umath_fp.dispatch.c.src'), - [AVX512_SKX] + [X86_V4] ], [ 'loops_unary.dispatch.h', src_file.process('src/umath/loops_unary.dispatch.c.src'), [ ASIMD, NEON, - AVX512_SKX, AVX2, SSE2, + X86_V4, X86_V3, X86_V2, VSX2, VXE, VX, LSX, @@ -1030,7 +1036,7 @@ foreach gen_mtargets : [ 'loops_unary_fp.dispatch.h', src_file.process('src/umath/loops_unary_fp.dispatch.c.src'), [ - SSE41, SSE2, + X86_V2, VSX2, ASIMD, NEON, VXE, VX, @@ -1041,7 +1047,7 @@ foreach gen_mtargets : [ 'loops_unary_fp_le.dispatch.h', src_file.process('src/umath/loops_unary_fp_le.dispatch.c.src'), [ - SSE41, SSE2, + X86_V2, VSX2, ASIMD, NEON, LSX, @@ -1051,7 +1057,7 @@ foreach gen_mtargets : [ 'loops_unary_complex.dispatch.h', src_file.process('src/umath/loops_unary_complex.dispatch.c.src'), [ - AVX512F, [AVX2, FMA3], SSE2, + X86_V4, X86_V3, X86_V2, ASIMD, NEON, VSX3, VSX2, VXE, VX, @@ -1062,17 +1068,18 @@ foreach gen_mtargets : [ 'loops_autovec.dispatch.h', src_file.process('src/umath/loops_autovec.dispatch.c.src'), [ - AVX2, SSE2, + X86_V3, X86_V2, NEON, VSX2, VX, LSX, + RVV ] ], [ 'loops_half.dispatch.h', src_file.process('src/umath/loops_half.dispatch.c.src'), - [AVX512_SPR, AVX512_SKX] + [AVX512_SPR, X86_V4] ], ] mtargets = mod_features.multi_targets( @@ -1103,6 +1110,7 @@ endforeach # ------------------------------ src_multiarray_umath_common = [ 'src/common/array_assign.c', + 'src/common/blas_utils.c', 'src/common/gil_utils.c', 'src/common/mem_overlap.c', 'src/common/npy_argparse.c', @@ -1113,6 +1121,7 @@ src_multiarray_umath_common = [ 'src/common/numpyos.c', 'src/common/npy_cpu_features.c', 'src/common/npy_cpu_dispatch.c', + 'src/common/npy_sort.c', src_file.process('src/common/templ_common.h.src') ] if have_blas @@ -1154,7 +1163,7 @@ src_multiarray = multiarray_gen_headers + [ 'src/multiarray/dragon4.c', 'src/multiarray/dtype_transfer.c', 'src/multiarray/dtype_traversal.c', - src_file.process('src/multiarray/einsum.c.src'), + 'src/multiarray/einsum.cpp', src_file.process('src/multiarray/einsum_sumprod.c.src'), 'src/multiarray/public_dtype_api.c', 'src/multiarray/flagsobject.c', @@ -1206,6 +1215,7 @@ src_multiarray = multiarray_gen_headers + [ # Remove this `arm64_exports.c` file once scipy macos arm64 build correctly # links to the arm64 npymath library, see gh-22673 'src/npymath/arm64_exports.c', + 'src/multiarray/fnv.c', ] src_umath = umath_gen_headers + [ @@ -1274,6 +1284,7 @@ unique_hash_so = static_library( include_directories: [ 'include', 'src/common', + 'src/npymath', ], dependencies: [ py_dep, @@ -1385,8 +1396,6 @@ python_sources = [ '_exceptions.pyi', '_internal.py', '_internal.pyi', - '_machar.py', - '_machar.pyi', '_methods.py', '_methods.pyi', '_simd.pyi', @@ -1396,6 +1405,7 @@ python_sources = [ '_type_aliases.pyi', '_ufunc_config.py', '_ufunc_config.pyi', + '_umath_tests.pyi', 'arrayprint.py', 'arrayprint.pyi', 'cversions.py', diff --git a/numpy/_core/multiarray.py b/numpy/_core/multiarray.py index 236ca7e7c9aa..54d240c89e3e 100644 --- a/numpy/_core/multiarray.py +++ b/numpy/_core/multiarray.py @@ -114,11 +114,20 @@ def _override___module__(): @array_function_from_c_func_and_dispatcher(_multiarray_umath.empty_like) def empty_like( - prototype, dtype=None, order=None, subok=None, shape=None, *, device=None + prototype, dtype=None, order="K", subok=True, shape=None, *, device=None ): """ - empty_like(prototype, dtype=None, order='K', subok=True, shape=None, *, - device=None) + empty_like( + prototype, + /, + dtype=None, + order='K', + subok=True, + shape=None, + *, + device=None, + ) + -- Return a new array with the same shape and type as a given array. @@ -186,15 +195,18 @@ def empty_like( @array_function_from_c_func_and_dispatcher(_multiarray_umath.concatenate) -def concatenate(arrays, axis=None, out=None, *, dtype=None, casting=None): +def concatenate(arrays, axis=0, out=None, *, dtype=None, casting="same_kind"): """ concatenate( - (a1, a2, ...), + arrays, + /, axis=0, out=None, + *, dtype=None, - casting="same_kind" + casting="same_kind", ) + -- Join a sequence of arrays along an existing axis. @@ -295,7 +307,7 @@ def concatenate(arrays, axis=None, out=None, *, dtype=None, casting=None): @array_function_from_c_func_and_dispatcher(_multiarray_umath.inner) -def inner(a, b): +def inner(a, b, /): """ inner(a, b, /) @@ -389,7 +401,7 @@ def inner(a, b): @array_function_from_c_func_and_dispatcher(_multiarray_umath.where) -def where(condition, x=None, y=None): +def where(condition, x=None, y=None, /): """ where(condition, [x, y], /) @@ -465,7 +477,7 @@ def where(condition, x=None, y=None): @array_function_from_c_func_and_dispatcher(_multiarray_umath.lexsort) -def lexsort(keys, axis=None): +def lexsort(keys, axis=-1): """ lexsort(keys, axis=-1) @@ -586,7 +598,7 @@ def lexsort(keys, axis=None): @array_function_from_c_func_and_dispatcher(_multiarray_umath.can_cast) -def can_cast(from_, to, casting=None): +def can_cast(from_, to, casting="safe"): """ can_cast(from_, to, casting='safe') @@ -648,7 +660,7 @@ def can_cast(from_, to, casting=None): @array_function_from_c_func_and_dispatcher(_multiarray_umath.min_scalar_type) -def min_scalar_type(a): +def min_scalar_type(a, /): """ min_scalar_type(a, /) @@ -688,7 +700,7 @@ def min_scalar_type(a): >>> np.min_scalar_type(1e50) dtype('float64') - >>> np.min_scalar_type(np.arange(4,dtype='f8')) + >>> np.min_scalar_type(np.arange(4, dtype=np.float64)) dtype('float64') """ @@ -701,19 +713,7 @@ def result_type(*arrays_and_dtypes): result_type(*arrays_and_dtypes) Returns the type that results from applying the NumPy - type promotion rules to the arguments. - - Type promotion in NumPy works similarly to the rules in languages - like C++, with some slight differences. When both scalars and - arrays are used, the array's type takes precedence and the actual value - of the scalar is taken into account. - - For example, calculating 3*a, where a is an array of 32-bit floats, - intuitively should result in a 32-bit float output. If the 3 is a - 32-bit integer, the NumPy rules indicate it can't convert losslessly - into a 32-bit float, so a 64-bit float should be the result type. - By examining the value of the constant, '3', we see that it fits in - an 8-bit integer, which can be cast losslessly into the 32-bit float. + :ref:`type promotion ` rules to the arguments. Parameters ---------- @@ -729,34 +729,13 @@ def result_type(*arrays_and_dtypes): -------- dtype, promote_types, min_scalar_type, can_cast - Notes - ----- - The specific algorithm used is as follows. - - Categories are determined by first checking which of boolean, - integer (int/uint), or floating point (float/complex) the maximum - kind of all the arrays and the scalars are. - - If there are only scalars or the maximum category of the scalars - is higher than the maximum category of the arrays, - the data types are combined with :func:`promote_types` - to produce the return value. - - Otherwise, `min_scalar_type` is called on each scalar, and - the resulting data types are all combined with :func:`promote_types` - to produce the return value. - - The set of int values is not a subset of the uint values for types - with the same number of bits, something not reflected in - :func:`min_scalar_type`, but handled as a special case in `result_type`. - Examples -------- >>> import numpy as np - >>> np.result_type(3, np.arange(7, dtype='i1')) + >>> np.result_type(3, np.arange(7, dtype=np.int8)) dtype('int8') - >>> np.result_type('i4', 'c8') + >>> np.result_type(np.int32, np.complex64) dtype('complex128') >>> np.result_type(3.0, -2) @@ -862,7 +841,7 @@ def dot(a, b, out=None): @array_function_from_c_func_and_dispatcher(_multiarray_umath.vdot) -def vdot(a, b): +def vdot(a, b, /): r""" vdot(a, b, /) @@ -925,7 +904,7 @@ def vdot(a, b): @array_function_from_c_func_and_dispatcher(_multiarray_umath.bincount) -def bincount(x, weights=None, minlength=None): +def bincount(x, /, weights=None, minlength=0): """ bincount(x, /, weights=None, minlength=0) @@ -982,7 +961,7 @@ def bincount(x, weights=None, minlength=None): The input array needs to be of integer dtype, otherwise a TypeError is raised: - >>> np.bincount(np.arange(5, dtype=float)) + >>> np.bincount(np.arange(5, dtype=np.float64)) Traceback (most recent call last): ... TypeError: Cannot cast array data from dtype('float64') to dtype('int64') @@ -1001,7 +980,7 @@ def bincount(x, weights=None, minlength=None): @array_function_from_c_func_and_dispatcher(_multiarray_umath.ravel_multi_index) -def ravel_multi_index(multi_index, dims, mode=None, order=None): +def ravel_multi_index(multi_index, dims, mode="raise", order="C"): """ ravel_multi_index(multi_index, dims, mode='raise', order='C') @@ -1059,7 +1038,7 @@ def ravel_multi_index(multi_index, dims, mode=None, order=None): @array_function_from_c_func_and_dispatcher(_multiarray_umath.unravel_index) -def unravel_index(indices, shape=None, order=None): +def unravel_index(indices, shape, order="C"): """ unravel_index(indices, shape, order='C') @@ -1104,7 +1083,7 @@ def unravel_index(indices, shape=None, order=None): @array_function_from_c_func_and_dispatcher(_multiarray_umath.copyto) -def copyto(dst, src, casting=None, where=None): +def copyto(dst, src, casting="same_kind", where=True): """ copyto(dst, src, casting='same_kind', where=True) @@ -1156,7 +1135,7 @@ def copyto(dst, src, casting=None, where=None): @array_function_from_c_func_and_dispatcher(_multiarray_umath.putmask) def putmask(a, /, mask, values): """ - putmask(a, mask, values) + putmask(a, /, mask, values) Changes elements of an array based on conditional and input values. @@ -1200,7 +1179,7 @@ def putmask(a, /, mask, values): @array_function_from_c_func_and_dispatcher(_multiarray_umath.packbits) -def packbits(a, axis=None, bitorder='big'): +def packbits(a, /, axis=None, bitorder="big"): """ packbits(a, /, axis=None, bitorder='big') @@ -1257,7 +1236,7 @@ def packbits(a, axis=None, bitorder='big'): @array_function_from_c_func_and_dispatcher(_multiarray_umath.unpackbits) -def unpackbits(a, axis=None, count=None, bitorder='big'): +def unpackbits(a, /, axis=None, count=None, bitorder="big"): """ unpackbits(a, /, axis=None, count=None, bitorder='big') @@ -1337,9 +1316,9 @@ def unpackbits(a, axis=None, count=None, bitorder='big'): @array_function_from_c_func_and_dispatcher(_multiarray_umath.shares_memory) -def shares_memory(a, b, max_work=None): +def shares_memory(a, b, /, max_work=-1): """ - shares_memory(a, b, /, max_work=None) + shares_memory(a, b, /, max_work=-1) Determine if two arrays share memory. @@ -1416,9 +1395,9 @@ def shares_memory(a, b, max_work=None): @array_function_from_c_func_and_dispatcher(_multiarray_umath.may_share_memory) -def may_share_memory(a, b, max_work=None): +def may_share_memory(a, b, /, max_work=0): """ - may_share_memory(a, b, /, max_work=None) + may_share_memory(a, b, /, max_work=0) Determine if two arrays might share memory @@ -1458,14 +1437,14 @@ def may_share_memory(a, b, max_work=None): @array_function_from_c_func_and_dispatcher(_multiarray_umath.is_busday) -def is_busday(dates, weekmask=None, holidays=None, busdaycal=None, out=None): +def is_busday(dates, weekmask="1111100", holidays=None, busdaycal=None, out=None): """ is_busday( dates, weekmask='1111100', holidays=None, busdaycal=None, - out=None + out=None, ) Calculates which of the given dates are valid days, and which are not. @@ -1517,7 +1496,7 @@ def is_busday(dates, weekmask=None, holidays=None, busdaycal=None, out=None): @array_function_from_c_func_and_dispatcher(_multiarray_umath.busday_offset) -def busday_offset(dates, offsets, roll=None, weekmask=None, holidays=None, +def busday_offset(dates, offsets, roll="raise", weekmask="1111100", holidays=None, busdaycal=None, out=None): """ busday_offset( @@ -1527,7 +1506,7 @@ def busday_offset(dates, offsets, roll=None, weekmask=None, holidays=None, weekmask='1111100', holidays=None, busdaycal=None, - out=None + out=None, ) First adjusts the date to fall on a valid day according to @@ -1619,7 +1598,7 @@ def busday_offset(dates, offsets, roll=None, weekmask=None, holidays=None, @array_function_from_c_func_and_dispatcher(_multiarray_umath.busday_count) -def busday_count(begindates, enddates, weekmask=None, holidays=None, +def busday_count(begindates, enddates, weekmask="1111100", holidays=(), busdaycal=None, out=None): """ busday_count( @@ -1692,9 +1671,8 @@ def busday_count(begindates, enddates, weekmask=None, holidays=None, return (begindates, enddates, weekmask, holidays, out) -@array_function_from_c_func_and_dispatcher( - _multiarray_umath.datetime_as_string) -def datetime_as_string(arr, unit=None, timezone=None, casting=None): +@array_function_from_c_func_and_dispatcher(_multiarray_umath.datetime_as_string) +def datetime_as_string(arr, unit=None, timezone="naive", casting="same_kind"): """ datetime_as_string(arr, unit=None, timezone='naive', casting='same_kind') @@ -1723,7 +1701,7 @@ def datetime_as_string(arr, unit=None, timezone=None, casting=None): Examples -------- >>> import numpy as np - >>> import pytz + >>> from zoneinfo import ZoneInfo >>> d = np.arange('2002-10-27T04:30', 4*60, 60, dtype='M8[m]') >>> d array(['2002-10-27T04:30', '2002-10-27T05:30', '2002-10-27T06:30', @@ -1736,9 +1714,9 @@ def datetime_as_string(arr, unit=None, timezone=None, casting=None): '2002-10-27T07:30Z'], dtype='>> np.datetime_as_string(d, timezone=pytz.timezone('US/Eastern')) + >>> np.datetime_as_string(d, timezone=ZoneInfo('US/Eastern')) array(['2002-10-27T00:30-0400', '2002-10-27T01:30-0400', '2002-10-27T01:30-0500', '2002-10-27T02:30-0500'], dtype='=3.12 _CastingKind, _CopyMode, _ModeKind, @@ -32,21 +24,19 @@ from numpy import ( # type: ignore[attr-defined] _NDIterFlagsOp, _OrderCF, _OrderKACF, - _SupportsBuffer, _SupportsFileMethods, broadcast, - # Re-exports busdaycalendar, complexfloating, correlate, count_nonzero, datetime64, dtype, + einsum as c_einsum, flatiter, float64, floating, from_dlpack, - generic, int_, interp, intp, @@ -56,21 +46,16 @@ from numpy import ( # type: ignore[attr-defined] signedinteger, str_, timedelta64, - # The rest ufunc, uint8, unsignedinteger, vecdot, ) -from numpy import ( - einsum as c_einsum, -) from numpy._typing import ( ArrayLike, - # DTypes DTypeLike, - # Arrays NDArray, + _AnyShape, _ArrayLike, _ArrayLikeBool_co, _ArrayLikeBytes_co, @@ -87,7 +72,6 @@ from numpy._typing import ( _IntLike_co, _NestedSequence, _ScalarLike_co, - # Shapes _Shape, _ShapeLike, _SupportsArrayFunc, @@ -101,7 +85,6 @@ from numpy._typing._ufunc import ( _PyFunc_Nin2_Nout1, _PyFunc_Nin3P_Nout1, ) -from numpy.lib._array_utils_impl import normalize_axis_index __all__ = [ "_ARRAY_API", @@ -195,25 +178,11 @@ __all__ = [ "zeros", ] -_ScalarT = TypeVar("_ScalarT", bound=generic) -_DTypeT = TypeVar("_DTypeT", bound=np.dtype) -_ArrayT = TypeVar("_ArrayT", bound=ndarray[Any, Any]) -_ArrayT_co = TypeVar( - "_ArrayT_co", - bound=ndarray[Any, Any], - covariant=True, -) -_ReturnType = TypeVar("_ReturnType") -_IDType = TypeVar("_IDType") -_Nin = TypeVar("_Nin", bound=int) -_Nout = TypeVar("_Nout", bound=int) - -_ShapeT = TypeVar("_ShapeT", bound=_Shape) -_Array: TypeAlias = ndarray[_ShapeT, dtype[_ScalarT]] -_Array1D: TypeAlias = ndarray[tuple[int], dtype[_ScalarT]] +type _Array[ShapeT: _Shape, ScalarT: np.generic] = ndarray[ShapeT, dtype[ScalarT]] +type _Array1D[ScalarT: np.generic] = ndarray[tuple[int], dtype[ScalarT]] # Valid time units -_UnitKind: TypeAlias = L[ +type _UnitKind = L[ "Y", "M", "D", @@ -227,7 +196,7 @@ _UnitKind: TypeAlias = L[ "fs", "as", ] -_RollKind: TypeAlias = L[ # `raise` is deliberately excluded +type _RollKind = L[ # `raise` is deliberately excluded "nat", "forward", "following", @@ -237,14 +206,16 @@ _RollKind: TypeAlias = L[ # `raise` is deliberately excluded "modifiedpreceding", ] -@type_check_only -class _SupportsArray(Protocol[_ArrayT_co]): - def __array__(self, /) -> _ArrayT_co: ... +type _ArangeScalar = np.integer | np.floating | np.datetime64 | np.timedelta64 + +# The datetime functions perform unsafe casts to `datetime64[D]`, +# so a lot of different argument types are allowed here +type _ToDates = dt.date | _NestedSequence[dt.date] +type _ToDeltas = dt.timedelta | _NestedSequence[dt.timedelta] @type_check_only -class _KwargsEmpty(TypedDict, total=False): - device: L["cpu"] | None - like: _SupportsArrayFunc | None +class _SupportsArray[ArrayT_co: np.ndarray](Protocol): + def __array__(self, /) -> ArrayT_co: ... @type_check_only class _ConstructorEmpty(Protocol): @@ -254,110 +225,134 @@ class _ConstructorEmpty(Protocol): self, /, shape: SupportsIndex, - dtype: None = ..., - order: _OrderCF = ..., - **kwargs: Unpack[_KwargsEmpty], + dtype: None = None, + order: _OrderCF = "C", + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, ) -> _Array1D[float64]: ... @overload - def __call__( + def __call__[DTypeT: np.dtype]( self, /, shape: SupportsIndex, - dtype: _DTypeT | _SupportsDType[_DTypeT], - order: _OrderCF = ..., - **kwargs: Unpack[_KwargsEmpty], - ) -> ndarray[tuple[int], _DTypeT]: ... + dtype: DTypeT | _SupportsDType[DTypeT], + order: _OrderCF = "C", + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, + ) -> ndarray[tuple[int], DTypeT]: ... @overload - def __call__( + def __call__[ScalarT: np.generic]( self, /, shape: SupportsIndex, - dtype: type[_ScalarT], - order: _OrderCF = ..., - **kwargs: Unpack[_KwargsEmpty], - ) -> _Array1D[_ScalarT]: ... + dtype: type[ScalarT], + order: _OrderCF = "C", + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, + ) -> _Array1D[ScalarT]: ... @overload def __call__( self, /, shape: SupportsIndex, - dtype: DTypeLike | None = ..., - order: _OrderCF = ..., - **kwargs: Unpack[_KwargsEmpty], - ) -> _Array1D[Any]: ... + dtype: DTypeLike | None = None, + order: _OrderCF = "C", + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, + ) -> _Array1D[Incomplete]: ... # known shape @overload - def __call__( + def __call__[ShapeT: _Shape]( self, /, - shape: _AnyShapeT, - dtype: None = ..., - order: _OrderCF = ..., - **kwargs: Unpack[_KwargsEmpty], - ) -> _Array[_AnyShapeT, float64]: ... + shape: ShapeT, + dtype: None = None, + order: _OrderCF = "C", + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, + ) -> _Array[ShapeT, float64]: ... @overload - def __call__( + def __call__[ShapeT: _Shape, DTypeT: np.dtype]( self, /, - shape: _AnyShapeT, - dtype: _DTypeT | _SupportsDType[_DTypeT], - order: _OrderCF = ..., - **kwargs: Unpack[_KwargsEmpty], - ) -> ndarray[_AnyShapeT, _DTypeT]: ... + shape: ShapeT, + dtype: DTypeT | _SupportsDType[DTypeT], + order: _OrderCF = "C", + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, + ) -> ndarray[ShapeT, DTypeT]: ... @overload - def __call__( + def __call__[ShapeT: _Shape, ScalarT: np.generic]( self, /, - shape: _AnyShapeT, - dtype: type[_ScalarT], - order: _OrderCF = ..., - **kwargs: Unpack[_KwargsEmpty], - ) -> _Array[_AnyShapeT, _ScalarT]: ... + shape: ShapeT, + dtype: type[ScalarT], + order: _OrderCF = "C", + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, + ) -> _Array[ShapeT, ScalarT]: ... @overload - def __call__( + def __call__[ShapeT: _Shape]( self, /, - shape: _AnyShapeT, - dtype: DTypeLike | None = ..., - order: _OrderCF = ..., - **kwargs: Unpack[_KwargsEmpty], - ) -> _Array[_AnyShapeT, Any]: ... + shape: ShapeT, + dtype: DTypeLike | None = None, + order: _OrderCF = "C", + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, + ) -> _Array[ShapeT, Incomplete]: ... # unknown shape @overload def __call__( self, /, shape: _ShapeLike, - dtype: None = ..., - order: _OrderCF = ..., - **kwargs: Unpack[_KwargsEmpty], + dtype: None = None, + order: _OrderCF = "C", + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, ) -> NDArray[float64]: ... @overload - def __call__( + def __call__[DTypeT: np.dtype]( self, /, shape: _ShapeLike, - dtype: _DTypeT | _SupportsDType[_DTypeT], - order: _OrderCF = ..., - **kwargs: Unpack[_KwargsEmpty], - ) -> ndarray[Any, _DTypeT]: ... + dtype: DTypeT | _SupportsDType[DTypeT], + order: _OrderCF = "C", + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, + ) -> ndarray[_AnyShape, DTypeT]: ... @overload - def __call__( + def __call__[ScalarT: np.generic]( self, /, shape: _ShapeLike, - dtype: type[_ScalarT], - order: _OrderCF = ..., - **kwargs: Unpack[_KwargsEmpty], - ) -> NDArray[_ScalarT]: ... + dtype: type[ScalarT], + order: _OrderCF = "C", + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, + ) -> NDArray[ScalarT]: ... @overload def __call__( self, /, shape: _ShapeLike, - dtype: DTypeLike | None = ..., - order: _OrderCF = ..., - **kwargs: Unpack[_KwargsEmpty], - ) -> NDArray[Any]: ... + dtype: DTypeLike | None = None, + order: _OrderCF = "C", + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, + ) -> NDArray[Incomplete]: ... # using `Final` or `TypeAlias` will break stubtest error = Exception @@ -370,332 +365,315 @@ NEEDS_INIT: Final = 8 NEEDS_PYAPI: Final = 16 USE_GETITEM: Final = 32 USE_SETITEM: Final = 64 -DATETIMEUNITS: Final[CapsuleType] -_ARRAY_API: Final[CapsuleType] -_flagdict: Final[dict[str, int]] -_monotonicity: Final[Callable[..., object]] -_place: Final[Callable[..., object]] -_reconstruct: Final[Callable[..., object]] -_vec_string: Final[Callable[..., object]] -correlate2: Final[Callable[..., object]] -dragon4_positional: Final[Callable[..., object]] -dragon4_scientific: Final[Callable[..., object]] -interp_complex: Final[Callable[..., object]] -set_datetimeparse_function: Final[Callable[..., object]] +DATETIMEUNITS: Final[CapsuleType] = ... +_ARRAY_API: Final[CapsuleType] = ... + +_flagdict: Final[dict[str, int]] = ... +_monotonicity: Final[Callable[..., object]] = ... +_place: Final[Callable[..., object]] = ... +_reconstruct: Final[Callable[..., object]] = ... +_vec_string: Final[Callable[..., object]] = ... +correlate2: Final[Callable[..., object]] = ... +dragon4_positional: Final[Callable[..., object]] = ... +dragon4_scientific: Final[Callable[..., object]] = ... +interp_complex: Final[Callable[..., object]] = ... +set_datetimeparse_function: Final[Callable[..., object]] = ... + def get_handler_name(a: NDArray[Any] = ..., /) -> str | None: ... def get_handler_version(a: NDArray[Any] = ..., /) -> int | None: ... def format_longfloat(x: np.longdouble, precision: int) -> str: ... -def scalar(dtype: _DTypeT, object: bytes | object = ...) -> ndarray[tuple[()], _DTypeT]: ... +def scalar[DTypeT: np.dtype](dtype: DTypeT, object: bytes | object = ...) -> ndarray[tuple[()], DTypeT]: ... def set_typeDict(dict_: dict[str, np.dtype], /) -> None: ... -typeinfo: Final[dict[str, np.dtype[np.generic]]] + +typeinfo: Final[dict[str, np.dtype[np.generic]]] = ... ALLOW_THREADS: Final[int] # 0 or 1 (system-specific) -BUFSIZE: L[8192] -CLIP: L[0] -WRAP: L[1] -RAISE: L[2] -MAXDIMS: L[32] -MAY_SHARE_BOUNDS: L[0] -MAY_SHARE_EXACT: L[-1] -tracemalloc_domain: L[389047] +BUFSIZE: Final = 8_192 +CLIP: Final = 0 +WRAP: Final = 1 +RAISE: Final = 2 +MAXDIMS: Final = 64 +MAY_SHARE_BOUNDS: Final = 0 +MAY_SHARE_EXACT: Final = -1 +tracemalloc_domain: Final = 389_047 -zeros: Final[_ConstructorEmpty] -empty: Final[_ConstructorEmpty] +zeros: Final[_ConstructorEmpty] = ... +empty: Final[_ConstructorEmpty] = ... @overload -def empty_like( - prototype: _ArrayT, - dtype: None = ..., - order: _OrderKACF = ..., - subok: bool = ..., - shape: _ShapeLike | None = ..., +def empty_like[ArrayT: np.ndarray]( + prototype: ArrayT, + /, + dtype: None = None, + order: _OrderKACF = "K", + subok: bool = True, + shape: _ShapeLike | None = None, *, - device: L["cpu"] | None = ..., -) -> _ArrayT: ... + device: L["cpu"] | None = None, +) -> ArrayT: ... @overload -def empty_like( - prototype: _ArrayLike[_ScalarT], - dtype: None = ..., - order: _OrderKACF = ..., - subok: bool = ..., - shape: _ShapeLike | None = ..., +def empty_like[ScalarT: np.generic]( + prototype: _ArrayLike[ScalarT], + /, + dtype: None = None, + order: _OrderKACF = "K", + subok: bool = True, + shape: _ShapeLike | None = None, *, - device: L["cpu"] | None = ..., -) -> NDArray[_ScalarT]: ... + device: L["cpu"] | None = None, +) -> NDArray[ScalarT]: ... @overload -def empty_like( - prototype: Any, - dtype: _DTypeLike[_ScalarT], - order: _OrderKACF = ..., - subok: bool = ..., - shape: _ShapeLike | None = ..., +def empty_like[ScalarT: np.generic]( + prototype: Incomplete, + /, + dtype: _DTypeLike[ScalarT], + order: _OrderKACF = "K", + subok: bool = True, + shape: _ShapeLike | None = None, *, - device: L["cpu"] | None = ..., -) -> NDArray[_ScalarT]: ... + device: L["cpu"] | None = None, +) -> NDArray[ScalarT]: ... @overload def empty_like( - prototype: Any, - dtype: DTypeLike | None = ..., - order: _OrderKACF = ..., - subok: bool = ..., - shape: _ShapeLike | None = ..., + prototype: Incomplete, + /, + dtype: DTypeLike | None = None, + order: _OrderKACF = "K", + subok: bool = True, + shape: _ShapeLike | None = None, *, - device: L["cpu"] | None = ..., -) -> NDArray[Any]: ... + device: L["cpu"] | None = None, +) -> NDArray[Incomplete]: ... @overload -def array( - object: _ArrayT, - dtype: None = ..., +def array[ArrayT: np.ndarray]( + object: ArrayT, + dtype: None = None, *, - copy: bool | _CopyMode | None = ..., - order: _OrderKACF = ..., + copy: bool | _CopyMode | None = True, + order: _OrderKACF = "K", subok: L[True], - ndmin: int = ..., - like: _SupportsArrayFunc | None = ..., -) -> _ArrayT: ... -@overload -def array( - object: _SupportsArray[_ArrayT], - dtype: None = ..., + ndmin: int = 0, + ndmax: int = 0, + like: _SupportsArrayFunc | None = None, +) -> ArrayT: ... +@overload +def array[ArrayT: np.ndarray]( + object: _SupportsArray[ArrayT], + dtype: None = None, *, - copy: bool | _CopyMode | None = ..., - order: _OrderKACF = ..., + copy: bool | _CopyMode | None = True, + order: _OrderKACF = "K", subok: L[True], - ndmin: L[0] = ..., - like: _SupportsArrayFunc | None = ..., -) -> _ArrayT: ... -@overload -def array( - object: _ArrayLike[_ScalarT], - dtype: None = ..., + ndmin: L[0] = 0, + ndmax: int = 0, + like: _SupportsArrayFunc | None = None, +) -> ArrayT: ... +@overload +def array[ScalarT: np.generic]( + object: _ArrayLike[ScalarT], + dtype: None = None, *, - copy: bool | _CopyMode | None = ..., - order: _OrderKACF = ..., - subok: bool = ..., - ndmin: int = ..., - like: _SupportsArrayFunc | None = ..., -) -> NDArray[_ScalarT]: ... -@overload -def array( + copy: bool | _CopyMode | None = True, + order: _OrderKACF = "K", + subok: bool = False, + ndmin: int = 0, + ndmax: int = 0, + like: _SupportsArrayFunc | None = None, +) -> NDArray[ScalarT]: ... +@overload +def array[ScalarT: np.generic]( object: Any, - dtype: _DTypeLike[_ScalarT], + dtype: _DTypeLike[ScalarT], *, - copy: bool | _CopyMode | None = ..., - order: _OrderKACF = ..., - subok: bool = ..., - ndmin: int = ..., - like: _SupportsArrayFunc | None = ..., -) -> NDArray[_ScalarT]: ... + copy: bool | _CopyMode | None = True, + order: _OrderKACF = "K", + subok: bool = False, + ndmin: int = 0, + ndmax: int = 0, + like: _SupportsArrayFunc | None = None, +) -> NDArray[ScalarT]: ... @overload def array( object: Any, - dtype: DTypeLike | None = ..., + dtype: DTypeLike | None = None, *, - copy: bool | _CopyMode | None = ..., - order: _OrderKACF = ..., - subok: bool = ..., - ndmin: int = ..., - like: _SupportsArrayFunc | None = ..., + copy: bool | _CopyMode | None = True, + order: _OrderKACF = "K", + subok: bool = False, + ndmin: int = 0, + ndmax: int = 0, + like: _SupportsArrayFunc | None = None, ) -> NDArray[Any]: ... +# @overload -def unravel_index( # type: ignore[misc] - indices: _IntLike_co, - shape: _ShapeLike, - order: _OrderCF = ..., -) -> tuple[intp, ...]: ... -@overload -def unravel_index( - indices: _ArrayLikeInt_co, - shape: _ShapeLike, - order: _OrderCF = ..., -) -> tuple[NDArray[intp], ...]: ... - -@overload -def ravel_multi_index( # type: ignore[misc] - multi_index: Sequence[_IntLike_co], - dims: Sequence[SupportsIndex], - mode: _ModeKind | tuple[_ModeKind, ...] = ..., - order: _OrderCF = ..., +def ravel_multi_index( + multi_index: SupportsLenAndGetItem[_IntLike_co], + dims: _ShapeLike, + mode: _ModeKind | tuple[_ModeKind, ...] = "raise", + order: _OrderCF = "C", ) -> intp: ... @overload def ravel_multi_index( - multi_index: Sequence[_ArrayLikeInt_co], - dims: Sequence[SupportsIndex], - mode: _ModeKind | tuple[_ModeKind, ...] = ..., - order: _OrderCF = ..., + multi_index: SupportsLenAndGetItem[_ArrayLikeInt_co], + dims: _ShapeLike, + mode: _ModeKind | tuple[_ModeKind, ...] = "raise", + order: _OrderCF = "C", ) -> NDArray[intp]: ... +# +@overload +def unravel_index(indices: _IntLike_co, shape: _ShapeLike, order: _OrderCF = "C") -> tuple[intp, ...]: ... +@overload +def unravel_index(indices: _ArrayLikeInt_co, shape: _ShapeLike, order: _OrderCF = "C") -> tuple[NDArray[intp], ...]: ... + +# +def normalize_axis_index(axis: int, ndim: int, msg_prefix: str | None = None) -> int: ... + # NOTE: Allow any sequence of array-like objects @overload -def concatenate( # type: ignore[misc] - arrays: _ArrayLike[_ScalarT], +def concatenate[ScalarT: np.generic]( + arrays: _ArrayLike[ScalarT], /, - axis: SupportsIndex | None = ..., - out: None = ..., + axis: SupportsIndex | None = 0, + out: None = None, *, - dtype: None = ..., - casting: _CastingKind | None = ... -) -> NDArray[_ScalarT]: ... + dtype: None = None, + casting: _CastingKind | None = "same_kind", +) -> NDArray[ScalarT]: ... @overload +def concatenate[ScalarT: np.generic]( + arrays: SupportsLenAndGetItem[ArrayLike], + /, + axis: SupportsIndex | None = 0, + out: None = None, + *, + dtype: _DTypeLike[ScalarT], + casting: _CastingKind | None = "same_kind", +) -> NDArray[ScalarT]: ... @overload -def concatenate( # type: ignore[misc] +def concatenate( arrays: SupportsLenAndGetItem[ArrayLike], /, - axis: SupportsIndex | None = ..., - out: None = ..., + axis: SupportsIndex | None = 0, + out: None = None, *, - dtype: _DTypeLike[_ScalarT], - casting: _CastingKind | None = ... -) -> NDArray[_ScalarT]: ... + dtype: DTypeLike | None = None, + casting: _CastingKind | None = "same_kind", +) -> NDArray[Incomplete]: ... @overload -def concatenate( # type: ignore[misc] +def concatenate[OutT: np.ndarray]( arrays: SupportsLenAndGetItem[ArrayLike], /, - axis: SupportsIndex | None = ..., - out: None = ..., + axis: SupportsIndex | None = 0, *, + out: OutT, dtype: DTypeLike | None = None, - casting: _CastingKind | None = ... -) -> NDArray[Any]: ... + casting: _CastingKind | None = "same_kind", +) -> OutT: ... @overload -def concatenate( +def concatenate[OutT: np.ndarray]( arrays: SupportsLenAndGetItem[ArrayLike], /, - axis: SupportsIndex | None = ..., - out: _ArrayT = ..., + axis: SupportsIndex | None, + out: OutT, *, - dtype: DTypeLike = ..., - casting: _CastingKind | None = ... -) -> _ArrayT: ... + dtype: DTypeLike | None = None, + casting: _CastingKind | None = "same_kind", +) -> OutT: ... -def inner( - a: ArrayLike, - b: ArrayLike, - /, -) -> Any: ... +def inner(a: ArrayLike, b: ArrayLike, /) -> Incomplete: ... @overload -def where( - condition: ArrayLike, - /, -) -> tuple[NDArray[intp], ...]: ... +def where(condition: ArrayLike, x: None = None, y: None = None, /) -> tuple[NDArray[intp], ...]: ... @overload -def where( - condition: ArrayLike, - x: ArrayLike, - y: ArrayLike, - /, -) -> NDArray[Any]: ... +def where(condition: ArrayLike, x: ArrayLike, y: ArrayLike, /) -> NDArray[Incomplete]: ... -def lexsort( - keys: ArrayLike, - axis: SupportsIndex | None = ..., -) -> Any: ... +def lexsort(keys: ArrayLike, axis: SupportsIndex = -1) -> NDArray[intp]: ... -def can_cast( - from_: ArrayLike | DTypeLike, - to: DTypeLike, - casting: _CastingKind | None = ..., -) -> bool: ... +def can_cast(from_: ArrayLike | DTypeLike, to: DTypeLike, casting: _CastingKind = "safe") -> bool: ... def min_scalar_type(a: ArrayLike, /) -> dtype: ... - -def result_type(*arrays_and_dtypes: ArrayLike | DTypeLike) -> dtype: ... +def result_type(*arrays_and_dtypes: ArrayLike | DTypeLike | None) -> dtype: ... @overload -def dot(a: ArrayLike, b: ArrayLike, out: None = ...) -> Any: ... +def dot(a: ArrayLike, b: ArrayLike, out: None = None) -> Incomplete: ... @overload -def dot(a: ArrayLike, b: ArrayLike, out: _ArrayT) -> _ArrayT: ... +def dot[OutT: np.ndarray](a: ArrayLike, b: ArrayLike, out: OutT) -> OutT: ... @overload -def vdot(a: _ArrayLikeBool_co, b: _ArrayLikeBool_co, /) -> np.bool: ... # type: ignore[misc] +def vdot(a: _ArrayLikeBool_co, b: _ArrayLikeBool_co, /) -> np.bool: ... @overload -def vdot(a: _ArrayLikeUInt_co, b: _ArrayLikeUInt_co, /) -> unsignedinteger: ... # type: ignore[misc] +def vdot(a: _ArrayLikeUInt_co, b: _ArrayLikeUInt_co, /) -> unsignedinteger: ... @overload -def vdot(a: _ArrayLikeInt_co, b: _ArrayLikeInt_co, /) -> signedinteger: ... # type: ignore[misc] +def vdot(a: _ArrayLikeInt_co, b: _ArrayLikeInt_co, /) -> signedinteger: ... @overload -def vdot(a: _ArrayLikeFloat_co, b: _ArrayLikeFloat_co, /) -> floating: ... # type: ignore[misc] +def vdot(a: _ArrayLikeFloat_co, b: _ArrayLikeFloat_co, /) -> floating: ... @overload -def vdot(a: _ArrayLikeComplex_co, b: _ArrayLikeComplex_co, /) -> complexfloating: ... # type: ignore[misc] +def vdot(a: _ArrayLikeComplex_co, b: _ArrayLikeComplex_co, /) -> complexfloating: ... @overload def vdot(a: _ArrayLikeTD64_co, b: _ArrayLikeTD64_co, /) -> timedelta64: ... @overload -def vdot(a: _ArrayLikeObject_co, b: Any, /) -> Any: ... +def vdot(a: _ArrayLikeObject_co, b: object, /) -> Any: ... @overload -def vdot(a: Any, b: _ArrayLikeObject_co, /) -> Any: ... +def vdot(a: object, b: _ArrayLikeObject_co, /) -> Any: ... -def bincount( - x: ArrayLike, - /, - weights: ArrayLike | None = ..., - minlength: SupportsIndex = ..., -) -> NDArray[intp]: ... +def bincount(x: ArrayLike, /, weights: ArrayLike | None = None, minlength: SupportsIndex = 0) -> NDArray[intp]: ... -def copyto( - dst: NDArray[Any], - src: ArrayLike, - casting: _CastingKind | None = ..., - where: _ArrayLikeBool_co | None = ..., -) -> None: ... +def copyto(dst: ndarray, src: ArrayLike, casting: _CastingKind = "same_kind", where: object = True) -> None: ... +def putmask(a: ndarray, /, mask: _ArrayLikeBool_co, values: ArrayLike) -> None: ... -def putmask( - a: NDArray[Any], - /, - mask: _ArrayLikeBool_co, - values: ArrayLike, -) -> None: ... +type _BitOrder = L["big", "little"] -def packbits( - a: _ArrayLikeInt_co, - /, - axis: SupportsIndex | None = ..., - bitorder: L["big", "little"] = ..., -) -> NDArray[uint8]: ... +@overload +def packbits(a: _ArrayLikeInt_co, /, axis: None = None, bitorder: _BitOrder = "big") -> ndarray[tuple[int], dtype[uint8]]: ... +@overload +def packbits(a: _ArrayLikeInt_co, /, axis: SupportsIndex, bitorder: _BitOrder = "big") -> NDArray[uint8]: ... +@overload +def unpackbits( + a: _ArrayLike[uint8], + /, + axis: None = None, + count: SupportsIndex | None = None, + bitorder: _BitOrder = "big", +) -> ndarray[tuple[int], dtype[uint8]]: ... +@overload def unpackbits( a: _ArrayLike[uint8], /, - axis: SupportsIndex | None = ..., - count: SupportsIndex | None = ..., - bitorder: L["big", "little"] = ..., + axis: SupportsIndex, + count: SupportsIndex | None = None, + bitorder: _BitOrder = "big", ) -> NDArray[uint8]: ... -def shares_memory( - a: object, - b: object, - /, - max_work: int | None = ..., -) -> bool: ... +type _MaxWork = L[-1, 0] -def may_share_memory( - a: object, - b: object, - /, - max_work: int | None = ..., -) -> bool: ... +# any two python objects will be accepted, not just `ndarray`s +def shares_memory(a: object, b: object, /, max_work: _MaxWork = -1) -> bool: ... +def may_share_memory(a: object, b: object, /, max_work: _MaxWork = 0) -> bool: ... @overload -def asarray( - a: _ArrayLike[_ScalarT], - dtype: None = ..., +def asarray[ScalarT: np.generic]( + a: _ArrayLike[ScalarT], + dtype: None = None, order: _OrderKACF = ..., *, device: L["cpu"] | None = ..., copy: bool | None = ..., like: _SupportsArrayFunc | None = ..., -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload -def asarray( +def asarray[ScalarT: np.generic]( a: Any, - dtype: _DTypeLike[_ScalarT], + dtype: _DTypeLike[ScalarT], order: _OrderKACF = ..., *, device: L["cpu"] | None = ..., copy: bool | None = ..., like: _SupportsArrayFunc | None = ..., -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload def asarray( a: Any, @@ -708,35 +686,35 @@ def asarray( ) -> NDArray[Any]: ... @overload -def asanyarray( - a: _ArrayT, # Preserve subclass-information - dtype: None = ..., +def asanyarray[ArrayT: np.ndarray]( + a: ArrayT, # Preserve subclass-information + dtype: None = None, order: _OrderKACF = ..., *, device: L["cpu"] | None = ..., copy: bool | None = ..., like: _SupportsArrayFunc | None = ..., -) -> _ArrayT: ... +) -> ArrayT: ... @overload -def asanyarray( - a: _ArrayLike[_ScalarT], - dtype: None = ..., +def asanyarray[ScalarT: np.generic]( + a: _ArrayLike[ScalarT], + dtype: None = None, order: _OrderKACF = ..., *, device: L["cpu"] | None = ..., copy: bool | None = ..., like: _SupportsArrayFunc | None = ..., -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload -def asanyarray( +def asanyarray[ScalarT: np.generic]( a: Any, - dtype: _DTypeLike[_ScalarT], + dtype: _DTypeLike[ScalarT], order: _OrderKACF = ..., *, device: L["cpu"] | None = ..., copy: bool | None = ..., like: _SupportsArrayFunc | None = ..., -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload def asanyarray( a: Any, @@ -749,19 +727,19 @@ def asanyarray( ) -> NDArray[Any]: ... @overload -def ascontiguousarray( - a: _ArrayLike[_ScalarT], - dtype: None = ..., +def ascontiguousarray[ScalarT: np.generic]( + a: _ArrayLike[ScalarT], + dtype: None = None, *, like: _SupportsArrayFunc | None = ..., -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload -def ascontiguousarray( +def ascontiguousarray[ScalarT: np.generic]( a: Any, - dtype: _DTypeLike[_ScalarT], + dtype: _DTypeLike[ScalarT], *, like: _SupportsArrayFunc | None = ..., -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload def ascontiguousarray( a: Any, @@ -771,19 +749,19 @@ def ascontiguousarray( ) -> NDArray[Any]: ... @overload -def asfortranarray( - a: _ArrayLike[_ScalarT], - dtype: None = ..., +def asfortranarray[ScalarT: np.generic]( + a: _ArrayLike[ScalarT], + dtype: None = None, *, like: _SupportsArrayFunc | None = ..., -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload -def asfortranarray( +def asfortranarray[ScalarT: np.generic]( a: Any, - dtype: _DTypeLike[_ScalarT], + dtype: _DTypeLike[ScalarT], *, like: _SupportsArrayFunc | None = ..., -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload def asfortranarray( a: Any, @@ -798,21 +776,21 @@ def promote_types(__type1: DTypeLike, __type2: DTypeLike) -> dtype: ... @overload def fromstring( string: str | bytes, - dtype: None = ..., + dtype: None = None, count: SupportsIndex = ..., *, sep: str, like: _SupportsArrayFunc | None = ..., ) -> NDArray[float64]: ... @overload -def fromstring( +def fromstring[ScalarT: np.generic]( string: str | bytes, - dtype: _DTypeLike[_ScalarT], + dtype: _DTypeLike[ScalarT], count: SupportsIndex = ..., *, sep: str, like: _SupportsArrayFunc | None = ..., -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload def fromstring( string: str | bytes, @@ -824,69 +802,69 @@ def fromstring( ) -> NDArray[Any]: ... @overload -def frompyfunc( # type: ignore[overload-overlap] - func: Callable[[Any], _ReturnType], /, +def frompyfunc[ReturnT]( + func: Callable[[Any], ReturnT], /, nin: L[1], nout: L[1], *, - identity: None = ..., -) -> _PyFunc_Nin1_Nout1[_ReturnType, None]: ... + identity: None = None, +) -> _PyFunc_Nin1_Nout1[ReturnT, None]: ... @overload -def frompyfunc( # type: ignore[overload-overlap] - func: Callable[[Any], _ReturnType], /, +def frompyfunc[ReturnT, IdentityT]( + func: Callable[[Any], ReturnT], /, nin: L[1], nout: L[1], *, - identity: _IDType, -) -> _PyFunc_Nin1_Nout1[_ReturnType, _IDType]: ... + identity: IdentityT, +) -> _PyFunc_Nin1_Nout1[ReturnT, IdentityT]: ... @overload -def frompyfunc( # type: ignore[overload-overlap] - func: Callable[[Any, Any], _ReturnType], /, +def frompyfunc[ReturnT]( + func: Callable[[Any, Any], ReturnT], /, nin: L[2], nout: L[1], *, - identity: None = ..., -) -> _PyFunc_Nin2_Nout1[_ReturnType, None]: ... + identity: None = None, +) -> _PyFunc_Nin2_Nout1[ReturnT, None]: ... @overload -def frompyfunc( # type: ignore[overload-overlap] - func: Callable[[Any, Any], _ReturnType], /, +def frompyfunc[ReturnT, IdentityT]( + func: Callable[[Any, Any], ReturnT], /, nin: L[2], nout: L[1], *, - identity: _IDType, -) -> _PyFunc_Nin2_Nout1[_ReturnType, _IDType]: ... + identity: IdentityT, +) -> _PyFunc_Nin2_Nout1[ReturnT, IdentityT]: ... @overload -def frompyfunc( # type: ignore[overload-overlap] - func: Callable[..., _ReturnType], /, - nin: _Nin, +def frompyfunc[ReturnT, NInT: int]( + func: Callable[..., ReturnT], /, + nin: NInT, nout: L[1], *, - identity: None = ..., -) -> _PyFunc_Nin3P_Nout1[_ReturnType, None, _Nin]: ... + identity: None = None, +) -> _PyFunc_Nin3P_Nout1[ReturnT, None, NInT]: ... @overload -def frompyfunc( # type: ignore[overload-overlap] - func: Callable[..., _ReturnType], /, - nin: _Nin, +def frompyfunc[ReturnT, NInT: int, IdentityT]( + func: Callable[..., ReturnT], /, + nin: NInT, nout: L[1], *, - identity: _IDType, -) -> _PyFunc_Nin3P_Nout1[_ReturnType, _IDType, _Nin]: ... + identity: IdentityT, +) -> _PyFunc_Nin3P_Nout1[ReturnT, IdentityT, NInT]: ... @overload -def frompyfunc( - func: Callable[..., _2PTuple[_ReturnType]], /, - nin: _Nin, - nout: _Nout, +def frompyfunc[ReturnT, NInT: int, NOutT: int]( + func: Callable[..., _2PTuple[ReturnT]], /, + nin: NInT, + nout: NOutT, *, - identity: None = ..., -) -> _PyFunc_Nin1P_Nout2P[_ReturnType, None, _Nin, _Nout]: ... + identity: None = None, +) -> _PyFunc_Nin1P_Nout2P[ReturnT, None, NInT, NOutT]: ... @overload -def frompyfunc( - func: Callable[..., _2PTuple[_ReturnType]], /, - nin: _Nin, - nout: _Nout, +def frompyfunc[ReturnT, NInT: int, NOutT: int, IdentityT]( + func: Callable[..., _2PTuple[ReturnT]], /, + nin: NInT, + nout: NOutT, *, - identity: _IDType, -) -> _PyFunc_Nin1P_Nout2P[_ReturnType, _IDType, _Nin, _Nout]: ... + identity: IdentityT, +) -> _PyFunc_Nin1P_Nout2P[ReturnT, IdentityT, NInT, NOutT]: ... @overload def frompyfunc( func: Callable[..., Any], /, @@ -899,7 +877,7 @@ def frompyfunc( @overload def fromfile( file: StrOrBytesPath | _SupportsFileMethods, - dtype: None = ..., + dtype: None = None, count: SupportsIndex = ..., sep: str = ..., offset: SupportsIndex = ..., @@ -907,15 +885,15 @@ def fromfile( like: _SupportsArrayFunc | None = ..., ) -> NDArray[float64]: ... @overload -def fromfile( +def fromfile[ScalarT: np.generic]( file: StrOrBytesPath | _SupportsFileMethods, - dtype: _DTypeLike[_ScalarT], + dtype: _DTypeLike[ScalarT], count: SupportsIndex = ..., sep: str = ..., offset: SupportsIndex = ..., *, like: _SupportsArrayFunc | None = ..., -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload def fromfile( file: StrOrBytesPath | _SupportsFileMethods, @@ -928,17 +906,17 @@ def fromfile( ) -> NDArray[Any]: ... @overload -def fromiter( +def fromiter[ScalarT: np.generic]( iter: Iterable[Any], - dtype: _DTypeLike[_ScalarT], + dtype: _DTypeLike[ScalarT], count: SupportsIndex = ..., *, like: _SupportsArrayFunc | None = ..., -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload def fromiter( iter: Iterable[Any], - dtype: DTypeLike, + dtype: DTypeLike | None, count: SupportsIndex = ..., *, like: _SupportsArrayFunc | None = ..., @@ -946,25 +924,25 @@ def fromiter( @overload def frombuffer( - buffer: _SupportsBuffer, - dtype: None = ..., + buffer: Buffer, + dtype: None = None, count: SupportsIndex = ..., offset: SupportsIndex = ..., *, like: _SupportsArrayFunc | None = ..., ) -> NDArray[float64]: ... @overload -def frombuffer( - buffer: _SupportsBuffer, - dtype: _DTypeLike[_ScalarT], +def frombuffer[ScalarT: np.generic]( + buffer: Buffer, + dtype: _DTypeLike[ScalarT], count: SupportsIndex = ..., offset: SupportsIndex = ..., *, like: _SupportsArrayFunc | None = ..., -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload def frombuffer( - buffer: _SupportsBuffer, + buffer: Buffer, dtype: DTypeLike | None = ..., count: SupportsIndex = ..., offset: SupportsIndex = ..., @@ -972,242 +950,271 @@ def frombuffer( like: _SupportsArrayFunc | None = ..., ) -> NDArray[Any]: ... -@overload -def arange( # type: ignore[misc] - stop: _IntLike_co, - /, *, - dtype: None = ..., - device: L["cpu"] | None = ..., - like: _SupportsArrayFunc | None = ..., -) -> _Array1D[signedinteger]: ... -@overload -def arange( # type: ignore[misc] - start: _IntLike_co, - stop: _IntLike_co, - step: _IntLike_co = ..., - dtype: None = ..., - *, - device: L["cpu"] | None = ..., - like: _SupportsArrayFunc | None = ..., -) -> _Array1D[signedinteger]: ... -@overload -def arange( # type: ignore[misc] - stop: _FloatLike_co, - /, *, - dtype: None = ..., - device: L["cpu"] | None = ..., - like: _SupportsArrayFunc | None = ..., -) -> _Array1D[floating]: ... -@overload -def arange( # type: ignore[misc] - start: _FloatLike_co, - stop: _FloatLike_co, - step: _FloatLike_co = ..., - dtype: None = ..., +# keep in sync with ma.core.arange +# NOTE: The `float64 | Any` return types needed to avoid incompatible overlapping overloads +@overload # dtype= +def arange[ScalarT: _ArangeScalar]( + start_or_stop: _ArangeScalar | float, + /, + stop: _ArangeScalar | float | None = None, + step: _ArangeScalar | float | None = 1, *, - device: L["cpu"] | None = ..., - like: _SupportsArrayFunc | None = ..., -) -> _Array1D[floating]: ... -@overload + dtype: _DTypeLike[ScalarT], + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, +) -> _Array1D[ScalarT]: ... +@overload # (int-like, int-like?, int-like?) def arange( - stop: _TD64Like_co, - /, *, - dtype: None = ..., - device: L["cpu"] | None = ..., - like: _SupportsArrayFunc | None = ..., -) -> _Array1D[timedelta64]: ... -@overload + start_or_stop: _IntLike_co, + /, + stop: _IntLike_co | None = None, + step: _IntLike_co | None = 1, + *, + dtype: type[int] | _DTypeLike[np.int_] | None = None, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, +) -> _Array1D[np.int_]: ... +@overload # (float, float-like?, float-like?) def arange( - start: _TD64Like_co, - stop: _TD64Like_co, - step: _TD64Like_co = ..., - dtype: None = ..., + start_or_stop: float | floating, + /, + stop: _FloatLike_co | None = None, + step: _FloatLike_co | None = 1, *, - device: L["cpu"] | None = ..., - like: _SupportsArrayFunc | None = ..., -) -> _Array1D[timedelta64]: ... -@overload -def arange( # both start and stop must always be specified for datetime64 - start: datetime64, - stop: datetime64, - step: datetime64 = ..., - dtype: None = ..., + dtype: type[float] | _DTypeLike[np.float64] | None = None, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, +) -> _Array1D[np.float64 | Any]: ... +@overload # (float-like, float, float-like?) +def arange( + start_or_stop: _FloatLike_co, + /, + stop: float | floating, + step: _FloatLike_co | None = 1, *, - device: L["cpu"] | None = ..., - like: _SupportsArrayFunc | None = ..., -) -> _Array1D[datetime64]: ... -@overload + dtype: type[float] | _DTypeLike[np.float64] | None = None, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, +) -> _Array1D[np.float64 | Any]: ... +@overload # (timedelta, timedelta-like?, timedelta-like?) def arange( - stop: Any, - /, *, - dtype: _DTypeLike[_ScalarT], - device: L["cpu"] | None = ..., - like: _SupportsArrayFunc | None = ..., -) -> _Array1D[_ScalarT]: ... -@overload + start_or_stop: np.timedelta64, + /, + stop: _TD64Like_co | None = None, + step: _TD64Like_co | None = 1, + *, + dtype: _DTypeLike[np.timedelta64] | None = None, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, +) -> _Array1D[np.timedelta64[Incomplete]]: ... +@overload # (timedelta-like, timedelta, timedelta-like?) def arange( - start: Any, - stop: Any, - step: Any = ..., - dtype: _DTypeLike[_ScalarT] = ..., + start_or_stop: _TD64Like_co, + /, + stop: np.timedelta64, + step: _TD64Like_co | None = 1, *, - device: L["cpu"] | None = ..., - like: _SupportsArrayFunc | None = ..., -) -> _Array1D[_ScalarT]: ... -@overload + dtype: _DTypeLike[np.timedelta64] | None = None, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, +) -> _Array1D[np.timedelta64[Incomplete]]: ... +@overload # (datetime, datetime, timedelta-like) (requires both start and stop) def arange( - stop: Any, /, + start_or_stop: np.datetime64, + /, + stop: np.datetime64, + step: _TD64Like_co | None = 1, *, - dtype: DTypeLike | None = ..., - device: L["cpu"] | None = ..., - like: _SupportsArrayFunc | None = ..., -) -> _Array1D[Any]: ... -@overload + dtype: _DTypeLike[np.datetime64] | None = None, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, +) -> _Array1D[np.datetime64[Incomplete]]: ... +@overload # dtype= def arange( - start: Any, - stop: Any, - step: Any = ..., - dtype: DTypeLike | None = ..., + start_or_stop: _ArangeScalar | float, + /, + stop: _ArangeScalar | float | None = None, + step: _ArangeScalar | float | None = 1, *, - device: L["cpu"] | None = ..., - like: _SupportsArrayFunc | None = ..., -) -> _Array1D[Any]: ... + dtype: DTypeLike | None = None, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, +) -> _Array1D[Incomplete]: ... -def datetime_data( - dtype: str | _DTypeLike[datetime64] | _DTypeLike[timedelta64], /, -) -> tuple[str, int]: ... - -# The datetime functions perform unsafe casts to `datetime64[D]`, -# so a lot of different argument types are allowed here +# +def datetime_data(dtype: str | _DTypeLike[datetime64 | timedelta64], /) -> tuple[str, int]: ... @overload -def busday_count( # type: ignore[misc] +def busday_count( begindates: _ScalarLike_co | dt.date, enddates: _ScalarLike_co | dt.date, - weekmask: ArrayLike = ..., - holidays: ArrayLike | dt.date | _NestedSequence[dt.date] | None = ..., - busdaycal: busdaycalendar | None = ..., - out: None = ..., + weekmask: ArrayLike = "1111100", + holidays: ArrayLike | _ToDates = (), + busdaycal: busdaycalendar | None = None, + out: None = None, ) -> int_: ... @overload -def busday_count( # type: ignore[misc] - begindates: ArrayLike | dt.date | _NestedSequence[dt.date], - enddates: ArrayLike | dt.date | _NestedSequence[dt.date], - weekmask: ArrayLike = ..., - holidays: ArrayLike | dt.date | _NestedSequence[dt.date] | None = ..., - busdaycal: busdaycalendar | None = ..., - out: None = ..., +def busday_count( + begindates: ArrayLike | _ToDates, + enddates: ArrayLike | _ToDates, + weekmask: ArrayLike = "1111100", + holidays: ArrayLike | _ToDates = (), + busdaycal: busdaycalendar | None = None, + out: None = None, ) -> NDArray[int_]: ... @overload -def busday_count( - begindates: ArrayLike | dt.date | _NestedSequence[dt.date], - enddates: ArrayLike | dt.date | _NestedSequence[dt.date], - weekmask: ArrayLike = ..., - holidays: ArrayLike | dt.date | _NestedSequence[dt.date] | None = ..., - busdaycal: busdaycalendar | None = ..., - out: _ArrayT = ..., -) -> _ArrayT: ... +def busday_count[OutT: np.ndarray]( + begindates: ArrayLike | _ToDates, + enddates: ArrayLike | _ToDates, + weekmask: ArrayLike = "1111100", + holidays: ArrayLike | _ToDates = (), + busdaycal: busdaycalendar | None = None, + *, + out: OutT, +) -> OutT: ... +@overload +def busday_count[OutT: np.ndarray]( + begindates: ArrayLike | _ToDates, + enddates: ArrayLike | _ToDates, + weekmask: ArrayLike, + holidays: ArrayLike | _ToDates, + busdaycal: busdaycalendar | None, + out: OutT, +) -> OutT: ... # `roll="raise"` is (more or less?) equivalent to `casting="safe"` @overload -def busday_offset( # type: ignore[misc] +def busday_offset( dates: datetime64 | dt.date, offsets: _TD64Like_co | dt.timedelta, - roll: L["raise"] = ..., - weekmask: ArrayLike = ..., - holidays: ArrayLike | dt.date | _NestedSequence[dt.date] | None = ..., - busdaycal: busdaycalendar | None = ..., - out: None = ..., + roll: L["raise"] = "raise", + weekmask: ArrayLike = "1111100", + holidays: ArrayLike | _ToDates | None = None, + busdaycal: busdaycalendar | None = None, + out: None = None, ) -> datetime64: ... @overload -def busday_offset( # type: ignore[misc] - dates: _ArrayLike[datetime64] | dt.date | _NestedSequence[dt.date], - offsets: _ArrayLikeTD64_co | dt.timedelta | _NestedSequence[dt.timedelta], - roll: L["raise"] = ..., - weekmask: ArrayLike = ..., - holidays: ArrayLike | dt.date | _NestedSequence[dt.date] | None = ..., - busdaycal: busdaycalendar | None = ..., - out: None = ..., +def busday_offset( + dates: _ArrayLike[datetime64] | _NestedSequence[dt.date], + offsets: _ArrayLikeTD64_co | _ToDeltas, + roll: L["raise"] = "raise", + weekmask: ArrayLike = "1111100", + holidays: ArrayLike | _ToDates | None = None, + busdaycal: busdaycalendar | None = None, + out: None = None, ) -> NDArray[datetime64]: ... @overload -def busday_offset( # type: ignore[misc] - dates: _ArrayLike[datetime64] | dt.date | _NestedSequence[dt.date], - offsets: _ArrayLikeTD64_co | dt.timedelta | _NestedSequence[dt.timedelta], - roll: L["raise"] = ..., - weekmask: ArrayLike = ..., - holidays: ArrayLike | dt.date | _NestedSequence[dt.date] | None = ..., - busdaycal: busdaycalendar | None = ..., - out: _ArrayT = ..., -) -> _ArrayT: ... -@overload -def busday_offset( # type: ignore[misc] +def busday_offset[OutT: np.ndarray]( + dates: _ArrayLike[datetime64] | _ToDates, + offsets: _ArrayLikeTD64_co | _ToDeltas, + roll: L["raise"] = "raise", + weekmask: ArrayLike = "1111100", + holidays: ArrayLike | _ToDates | None = None, + busdaycal: busdaycalendar | None = None, + *, + out: OutT, +) -> OutT: ... +@overload +def busday_offset[OutT: np.ndarray]( + dates: _ArrayLike[datetime64] | _ToDates, + offsets: _ArrayLikeTD64_co | _ToDeltas, + roll: L["raise"], + weekmask: ArrayLike, + holidays: ArrayLike | _ToDates | None, + busdaycal: busdaycalendar | None, + out: OutT, +) -> OutT: ... +@overload +def busday_offset( dates: _ScalarLike_co | dt.date, offsets: _ScalarLike_co | dt.timedelta, roll: _RollKind, - weekmask: ArrayLike = ..., - holidays: ArrayLike | dt.date | _NestedSequence[dt.date] | None = ..., - busdaycal: busdaycalendar | None = ..., - out: None = ..., + weekmask: ArrayLike = "1111100", + holidays: ArrayLike | _ToDates | None = None, + busdaycal: busdaycalendar | None = None, + out: None = None, ) -> datetime64: ... @overload -def busday_offset( # type: ignore[misc] - dates: ArrayLike | dt.date | _NestedSequence[dt.date], - offsets: ArrayLike | dt.timedelta | _NestedSequence[dt.timedelta], +def busday_offset( + dates: ArrayLike | _NestedSequence[dt.date], + offsets: ArrayLike | _ToDeltas, roll: _RollKind, - weekmask: ArrayLike = ..., - holidays: ArrayLike | dt.date | _NestedSequence[dt.date] | None = ..., - busdaycal: busdaycalendar | None = ..., - out: None = ..., + weekmask: ArrayLike = "1111100", + holidays: ArrayLike | _ToDates | None = None, + busdaycal: busdaycalendar | None = None, + out: None = None, ) -> NDArray[datetime64]: ... @overload -def busday_offset( - dates: ArrayLike | dt.date | _NestedSequence[dt.date], - offsets: ArrayLike | dt.timedelta | _NestedSequence[dt.timedelta], +def busday_offset[OutT: np.ndarray]( + dates: ArrayLike | _ToDates, + offsets: ArrayLike | _ToDeltas, roll: _RollKind, - weekmask: ArrayLike = ..., - holidays: ArrayLike | dt.date | _NestedSequence[dt.date] | None = ..., - busdaycal: busdaycalendar | None = ..., - out: _ArrayT = ..., -) -> _ArrayT: ... + weekmask: ArrayLike = "1111100", + holidays: ArrayLike | _ToDates | None = None, + busdaycal: busdaycalendar | None = None, + *, + out: OutT, +) -> OutT: ... +@overload +def busday_offset[OutT: np.ndarray]( + dates: ArrayLike | _ToDates, + offsets: ArrayLike | _ToDeltas, + roll: _RollKind, + weekmask: ArrayLike, + holidays: ArrayLike | _ToDates | None, + busdaycal: busdaycalendar | None, + out: OutT, +) -> OutT: ... @overload -def is_busday( # type: ignore[misc] +def is_busday( dates: _ScalarLike_co | dt.date, - weekmask: ArrayLike = ..., - holidays: ArrayLike | dt.date | _NestedSequence[dt.date] | None = ..., - busdaycal: busdaycalendar | None = ..., - out: None = ..., + weekmask: ArrayLike = "1111100", + holidays: ArrayLike | _ToDates | None = None, + busdaycal: busdaycalendar | None = None, + out: None = None, ) -> np.bool: ... @overload -def is_busday( # type: ignore[misc] +def is_busday( dates: ArrayLike | _NestedSequence[dt.date], - weekmask: ArrayLike = ..., - holidays: ArrayLike | dt.date | _NestedSequence[dt.date] | None = ..., - busdaycal: busdaycalendar | None = ..., - out: None = ..., + weekmask: ArrayLike = "1111100", + holidays: ArrayLike | _ToDates | None = None, + busdaycal: busdaycalendar | None = None, + out: None = None, ) -> NDArray[np.bool]: ... @overload -def is_busday( - dates: ArrayLike | _NestedSequence[dt.date], - weekmask: ArrayLike = ..., - holidays: ArrayLike | dt.date | _NestedSequence[dt.date] | None = ..., - busdaycal: busdaycalendar | None = ..., - out: _ArrayT = ..., -) -> _ArrayT: ... +def is_busday[OutT: np.ndarray]( + dates: ArrayLike | _ToDates, + weekmask: ArrayLike = "1111100", + holidays: ArrayLike | _ToDates | None = None, + busdaycal: busdaycalendar | None = None, + *, + out: OutT, +) -> OutT: ... +@overload +def is_busday[OutT: np.ndarray]( + dates: ArrayLike | _ToDates, + weekmask: ArrayLike, + holidays: ArrayLike | _ToDates | None, + busdaycal: busdaycalendar | None, + out: OutT, +) -> OutT: ... + +type _TimezoneContext = L["naive", "UTC", "local"] | dt.tzinfo @overload -def datetime_as_string( # type: ignore[misc] +def datetime_as_string( arr: datetime64 | dt.date, - unit: L["auto"] | _UnitKind | None = ..., - timezone: L["naive", "UTC", "local"] | dt.tzinfo = ..., - casting: _CastingKind = ..., + unit: L["auto"] | _UnitKind | None = None, + timezone: _TimezoneContext = "naive", + casting: _CastingKind = "same_kind", ) -> str_: ... @overload def datetime_as_string( arr: _ArrayLikeDT64_co | _NestedSequence[dt.date], - unit: L["auto"] | _UnitKind | None = ..., - timezone: L["naive", "UTC", "local"] | dt.tzinfo = ..., - casting: _CastingKind = ..., + unit: L["auto"] | _UnitKind | None = None, + timezone: _TimezoneContext = "naive", + casting: _CastingKind = "same_kind", ) -> NDArray[str_]: ... @overload @@ -1227,7 +1234,7 @@ def compare_chararrays( def add_docstring(obj: Callable[..., Any], docstring: str, /) -> None: ... -_GetItemKeys: TypeAlias = L[ +type _GetItemKeys = L[ "C", "CONTIGUOUS", "C_CONTIGUOUS", "F", "FORTRAN", "F_CONTIGUOUS", "W", "WRITEABLE", @@ -1240,7 +1247,7 @@ _GetItemKeys: TypeAlias = L[ "FNC", "FORC", ] -_SetItemKeys: TypeAlias = L[ +type _SetItemKeys = L[ "A", "ALIGNED", "W", "WRITEABLE", "X", "WRITEBACKIFCOPY", @@ -1284,7 +1291,7 @@ def nested_iters( axes: Sequence[Sequence[SupportsIndex]], flags: Sequence[_NDIterFlagsKind] | None = ..., op_flags: Sequence[Sequence[_NDIterFlagsOp]] | None = ..., - op_dtypes: DTypeLike | Sequence[DTypeLike] = ..., + op_dtypes: DTypeLike | Sequence[DTypeLike | None] | None = ..., order: _OrderKACF = ..., casting: _CastingKind = ..., buffersize: SupportsIndex = ..., diff --git a/numpy/_core/numeric.py b/numpy/_core/numeric.py index 964447fa0d8a..d4e1685501d7 100644 --- a/numpy/_core/numeric.py +++ b/numpy/_core/numeric.py @@ -10,8 +10,7 @@ import numpy as np from numpy.exceptions import AxisError -from . import multiarray, numerictypes, overrides, shape_base, umath -from . import numerictypes as nt +from . import multiarray, numerictypes, numerictypes as nt, overrides, shape_base, umath from ._ufunc_config import errstate from .multiarray import ( # noqa: F401 ALLOW_THREADS, @@ -152,7 +151,7 @@ def zeros_like( array([[0, 0, 0], [0, 0, 0]]) - >>> y = np.arange(3, dtype=float) + >>> y = np.arange(3, dtype=np.float64) >>> y array([0., 1., 2.]) >>> np.zeros_like(y) @@ -212,7 +211,7 @@ def ones(shape, dtype=None, order='C', *, device=None, like=None): >>> np.ones(5) array([1., 1., 1., 1., 1.]) - >>> np.ones((5,), dtype=int) + >>> np.ones((5,), dtype=np.int_) array([1, 1, 1, 1, 1]) >>> np.ones((2, 1)) @@ -301,7 +300,7 @@ def ones_like( array([[1, 1, 1], [1, 1, 1]]) - >>> y = np.arange(3, dtype=float) + >>> y = np.arange(3, dtype=np.float64) >>> y array([0., 1., 2.]) >>> np.ones_like(y) @@ -449,21 +448,21 @@ def full_like( Examples -------- >>> import numpy as np - >>> x = np.arange(6, dtype=int) + >>> x = np.arange(6, dtype=np.int_) >>> np.full_like(x, 1) array([1, 1, 1, 1, 1, 1]) >>> np.full_like(x, 0.1) array([0, 0, 0, 0, 0, 0]) - >>> np.full_like(x, 0.1, dtype=np.double) + >>> np.full_like(x, 0.1, dtype=np.float64) array([0.1, 0.1, 0.1, 0.1, 0.1, 0.1]) - >>> np.full_like(x, np.nan, dtype=np.double) + >>> np.full_like(x, np.nan, dtype=np.float64) array([nan, nan, nan, nan, nan, nan]) - >>> y = np.arange(6, dtype=np.double) + >>> y = np.arange(6, dtype=np.float64) >>> np.full_like(y, 0.1) array([0.1, 0.1, 0.1, 0.1, 0.1, 0.1]) - >>> y = np.zeros([2, 2, 3], dtype=int) + >>> y = np.zeros([2, 2, 3], dtype=np.int_) >>> np.full_like(y, [0, 0, 255]) array([[[ 0, 0, 255], [ 0, 0, 255]], @@ -524,11 +523,11 @@ def count_nonzero(a, axis=None, *, keepdims=False): -------- >>> import numpy as np >>> np.count_nonzero(np.eye(4)) - 4 + np.int64(4) >>> a = np.array([[0, 1, 7, 0], ... [3, 0, 2, 19]]) >>> np.count_nonzero(a) - 5 + np.int64(5) >>> np.count_nonzero(a, axis=0) array([1, 1, 2, 1]) >>> np.count_nonzero(a, axis=1) @@ -894,12 +893,12 @@ def convolve(a, v, mode='full'): """ a, v = array(a, copy=None, ndmin=1), array(v, copy=None, ndmin=1) - if (len(v) > len(a)): - a, v = v, a if len(a) == 0: raise ValueError('a cannot be empty') if len(v) == 0: raise ValueError('v cannot be empty') + if len(v) > len(a): + a, v = v, a return multiarray.correlate(a, v[::-1], mode) @@ -983,7 +982,7 @@ def outer(a, b, out=None): An example using a "vector" of letters: - >>> x = np.array(['a', 'b', 'c'], dtype=object) + >>> x = np.array(['a', 'b', 'c'], dtype=np.object_) >>> np.outer(x, [1, 2, 3]) array([['a', 'aa', 'aaa'], ['b', 'bb', 'bbb'], @@ -1107,10 +1106,9 @@ def tensordot(a, b, axes=2): An extended example taking advantage of the overloading of + and \\*: - >>> a = np.array(range(1, 9)) - >>> a.shape = (2, 2, 2) - >>> A = np.array(('a', 'b', 'c', 'd'), dtype=object) - >>> A.shape = (2, 2) + >>> a = np.array(range(1, 9)).reshape((2, 2, 2)) + >>> A = np.array(('a', 'b', 'c', 'd'), dtype=np.object_) + >>> A = A.reshape((2, 2)) >>> a; A array([[[1, 2], [3, 4]], @@ -1924,20 +1922,20 @@ def fromfunction(function, shape, *, dtype=float, like=None, **kwargs): Examples -------- >>> import numpy as np - >>> np.fromfunction(lambda i, j: i, (2, 2), dtype=float) + >>> np.fromfunction(lambda i, j: i, (2, 2), dtype=np.float64) array([[0., 0.], [1., 1.]]) - >>> np.fromfunction(lambda i, j: j, (2, 2), dtype=float) + >>> np.fromfunction(lambda i, j: j, (2, 2), dtype=np.float64) array([[0., 1.], [0., 1.]]) - >>> np.fromfunction(lambda i, j: i == j, (3, 3), dtype=int) + >>> np.fromfunction(lambda i, j: i == j, (3, 3), dtype=np.int_) array([[ True, False, False], [False, True, False], [False, False, True]]) - >>> np.fromfunction(lambda i, j: i + j, (3, 3), dtype=int) + >>> np.fromfunction(lambda i, j: i + j, (3, 3), dtype=np.int_) array([[0, 1, 2], [1, 2, 3], [2, 3, 4]]) diff --git a/numpy/_core/numeric.pyi b/numpy/_core/numeric.pyi index 919fe1917197..ddf0bfa31977 100644 --- a/numpy/_core/numeric.pyi +++ b/numpy/_core/numeric.pyi @@ -1,18 +1,16 @@ -from collections.abc import Callable, Sequence +from _typeshed import Incomplete +from builtins import bool as py_bool +from collections.abc import Callable, Iterable, Sequence from typing import ( Any, Final, - Never, - NoReturn, + Literal as L, SupportsAbs, SupportsIndex, - TypeAlias, TypeGuard, TypeVar, - Unpack, overload, ) -from typing import Literal as L import numpy as np from numpy import ( @@ -20,32 +18,12 @@ from numpy import ( True_, _OrderCF, _OrderKACF, - # re-exports bitwise_not, - broadcast, - complexfloating, - dtype, - flatiter, - float64, - floating, - from_dlpack, - # other - generic, inf, - int_, - intp, little_endian, - matmul, nan, - ndarray, - nditer, newaxis, - object_, - signedinteger, - timedelta64, ufunc, - unsignedinteger, - vecdot, ) from numpy._typing import ( ArrayLike, @@ -56,50 +34,129 @@ from numpy._typing import ( _ArrayLikeComplex_co, _ArrayLikeFloat_co, _ArrayLikeInt_co, - _ArrayLikeObject_co, + _ArrayLikeNumber_co, _ArrayLikeTD64_co, - _ArrayLikeUInt_co, + _Complex128Codes, _DTypeLike, + _DTypeLikeBool, + _Float64Codes, + _IntPCodes, _NestedSequence, + _NumberLike_co, _ScalarLike_co, _Shape, _ShapeLike, + _SupportsArray, _SupportsArrayFunc, _SupportsDType, ) -from .fromnumeric import all as all -from .fromnumeric import any as any -from .fromnumeric import argpartition as argpartition -from .fromnumeric import matrix_transpose as matrix_transpose -from .fromnumeric import mean as mean +from ._asarray import require +from ._ufunc_config import ( + errstate, + getbufsize, + geterr, + geterrcall, + setbufsize, + seterr, + seterrcall, +) +from .arrayprint import ( + array2string, + array_repr, + array_str, + format_float_positional, + format_float_scientific, + get_printoptions, + printoptions, + set_printoptions, +) +from .fromnumeric import ( + all, + amax, + amin, + any, + argmax, + argmin, + argpartition, + argsort, + around, + choose, + clip, + compress, + cumprod, + cumsum, + cumulative_prod, + cumulative_sum, + diagonal, + matrix_transpose, + max, + mean, + min, + ndim, + nonzero, + partition, + prod, + ptp, + put, + ravel, + repeat, + reshape, + resize, + round, + searchsorted, + shape, + size, + sort, + squeeze, + std, + sum, + swapaxes, + take, + trace, + transpose, + var, +) from .multiarray import ( - # other + ALLOW_THREADS as ALLOW_THREADS, + BUFSIZE as BUFSIZE, + CLIP as CLIP, + MAXDIMS as MAXDIMS, + MAY_SHARE_BOUNDS as MAY_SHARE_BOUNDS, + MAY_SHARE_EXACT as MAY_SHARE_EXACT, + RAISE as RAISE, + WRAP as WRAP, _Array, _ConstructorEmpty, - _KwargsEmpty, - # re-exports arange, array, asanyarray, asarray, ascontiguousarray, asfortranarray, + broadcast, can_cast, concatenate, copyto, dot, + dtype, empty, empty_like, + flatiter, + from_dlpack, frombuffer, fromfile, fromiter, fromstring, inner, lexsort, + matmul, may_share_memory, min_scalar_type, + ndarray, + nditer, nested_iters, + normalize_axis_index as normalize_axis_index, promote_types, putmask, result_type, @@ -108,327 +165,759 @@ from .multiarray import ( where, zeros, ) +from .numerictypes import ( + ScalarType, + bool, + bool_, + busday_count, + busday_offset, + busdaycalendar, + byte, + bytes_, + cdouble, + character, + clongdouble, + complex64, + complex128, + complex192, + complex256, + complexfloating, + csingle, + datetime64, + datetime_as_string, + datetime_data, + double, + flexible, + float16, + float32, + float64, + float96, + float128, + floating, + generic, + half, + inexact, + int8, + int16, + int32, + int64, + int_, + intc, + integer, + intp, + is_busday, + isdtype, + issubdtype, + long, + longdouble, + longlong, + number, + object_, + short, + signedinteger, + single, + str_, + timedelta64, + typecodes, + ubyte, + uint, + uint8, + uint16, + uint32, + uint64, + uintc, + uintp, + ulong, + ulonglong, + unsignedinteger, + ushort, + void, +) +from .umath import ( + absolute, + add, + arccos, + arccosh, + arcsin, + arcsinh, + arctan, + arctan2, + arctanh, + bitwise_and, + bitwise_count, + bitwise_or, + bitwise_xor, + cbrt, + ceil, + conj, + conjugate, + copysign, + cos, + cosh, + deg2rad, + degrees, + divide, + divmod, + e, + equal, + euler_gamma, + exp, + exp2, + expm1, + fabs, + float_power, + floor, + floor_divide, + fmax, + fmin, + fmod, + frexp, + frompyfunc, + gcd, + greater, + greater_equal, + heaviside, + hypot, + invert, + isfinite, + isinf, + isnan, + isnat, + lcm, + ldexp, + left_shift, + less, + less_equal, + log, + log1p, + log2, + log10, + logaddexp, + logaddexp2, + logical_and, + logical_not, + logical_or, + logical_xor, + matvec, + maximum, + minimum, + mod, + modf, + multiply, + negative, + nextafter, + not_equal, + pi, + positive, + power, + rad2deg, + radians, + reciprocal, + remainder, + right_shift, + rint, + sign, + signbit, + sin, + sinh, + spacing, + sqrt, + square, + subtract, + tan, + tanh, + true_divide, + trunc, + vecdot, + vecmat, +) __all__ = [ - "newaxis", - "ndarray", - "flatiter", - "nditer", - "nested_iters", - "ufunc", + "False_", + "ScalarType", + "True_", + "absolute", + "add", + "all", + "allclose", + "amax", + "amin", + "any", "arange", + "arccos", + "arccosh", + "arcsin", + "arcsinh", + "arctan", + "arctan2", + "arctanh", + "argmax", + "argmin", + "argpartition", + "argsort", + "argwhere", + "around", "array", - "asarray", + "array2string", + "array_equal", + "array_equiv", + "array_repr", + "array_str", "asanyarray", + "asarray", "ascontiguousarray", "asfortranarray", - "zeros", - "count_nonzero", - "empty", - "broadcast", - "dtype", - "fromstring", - "fromfile", - "frombuffer", - "from_dlpack", - "where", - "argwhere", - "copyto", - "concatenate", - "lexsort", "astype", + "base_repr", + "binary_repr", + "bitwise_and", + "bitwise_count", + "bitwise_not", + "bitwise_or", + "bitwise_xor", + "bool", + "bool_", + "broadcast", + "busday_count", + "busday_offset", + "busdaycalendar", + "byte", + "bytes_", "can_cast", - "promote_types", - "min_scalar_type", - "result_type", - "isfortran", - "empty_like", - "zeros_like", - "ones_like", - "correlate", + "cbrt", + "cdouble", + "ceil", + "character", + "choose", + "clip", + "clongdouble", + "complex64", + "complex128", + "complex192", + "complex256", + "complexfloating", + "compress", + "concatenate", + "conj", + "conjugate", "convolve", - "inner", - "dot", - "outer", - "vdot", - "roll", - "rollaxis", - "moveaxis", + "copysign", + "copyto", + "correlate", + "cos", + "cosh", + "count_nonzero", "cross", - "tensordot", - "little_endian", + "csingle", + "cumprod", + "cumsum", + "cumulative_prod", + "cumulative_sum", + "datetime64", + "datetime_as_string", + "datetime_data", + "deg2rad", + "degrees", + "diagonal", + "divide", + "divmod", + "dot", + "double", + "dtype", + "e", + "empty", + "empty_like", + "equal", + "errstate", + "euler_gamma", + "exp", + "exp2", + "expm1", + "fabs", + "flatiter", + "flatnonzero", + "flexible", + "float16", + "float32", + "float64", + "float96", + "float128", + "float_power", + "floating", + "floor", + "floor_divide", + "fmax", + "fmin", + "fmod", + "format_float_positional", + "format_float_scientific", + "frexp", + "from_dlpack", + "frombuffer", + "fromfile", + "fromfunction", "fromiter", - "array_equal", - "array_equiv", + "frompyfunc", + "fromstring", + "full", + "full_like", + "gcd", + "generic", + "get_printoptions", + "getbufsize", + "geterr", + "geterrcall", + "greater", + "greater_equal", + "half", + "heaviside", + "hypot", + "identity", "indices", - "fromfunction", + "inexact", + "inf", + "inner", + "int8", + "int16", + "int32", + "int64", + "int_", + "intc", + "integer", + "intp", + "invert", + "is_busday", "isclose", + "isdtype", + "isfinite", + "isfortran", + "isinf", + "isnan", + "isnat", "isscalar", - "binary_repr", - "base_repr", + "issubdtype", + "lcm", + "ldexp", + "left_shift", + "less", + "less_equal", + "lexsort", + "little_endian", + "log", + "log1p", + "log2", + "log10", + "logaddexp", + "logaddexp2", + "logical_and", + "logical_not", + "logical_or", + "logical_xor", + "long", + "longdouble", + "longlong", + "matmul", + "matrix_transpose", + "matvec", + "max", + "maximum", + "may_share_memory", + "mean", + "min", + "min_scalar_type", + "minimum", + "mod", + "modf", + "moveaxis", + "multiply", + "nan", + "ndarray", + "ndim", + "nditer", + "negative", + "nested_iters", + "newaxis", + "nextafter", + "nonzero", + "not_equal", + "number", + "object_", "ones", - "identity", - "allclose", + "ones_like", + "outer", + "partition", + "pi", + "positive", + "power", + "printoptions", + "prod", + "promote_types", + "ptp", + "put", "putmask", - "flatnonzero", - "inf", - "nan", - "False_", - "True_", - "bitwise_not", - "full", - "full_like", - "matmul", - "vecdot", + "rad2deg", + "radians", + "ravel", + "reciprocal", + "remainder", + "repeat", + "require", + "reshape", + "resize", + "result_type", + "right_shift", + "rint", + "roll", + "rollaxis", + "round", + "searchsorted", + "set_printoptions", + "setbufsize", + "seterr", + "seterrcall", + "shape", "shares_memory", - "may_share_memory", + "short", + "sign", + "signbit", + "signedinteger", + "sin", + "single", + "sinh", + "size", + "sort", + "spacing", + "sqrt", + "square", + "squeeze", + "std", + "str_", + "subtract", + "sum", + "swapaxes", + "take", + "tan", + "tanh", + "tensordot", + "timedelta64", + "trace", + "transpose", + "true_divide", + "trunc", + "typecodes", + "ubyte", + "ufunc", + "uint", + "uint8", + "uint16", + "uint32", + "uint64", + "uintc", + "uintp", + "ulong", + "ulonglong", + "unsignedinteger", + "ushort", + "var", + "vdot", + "vecdot", + "vecmat", + "void", + "where", + "zeros", + "zeros_like", ] -_T = TypeVar("_T") -_ScalarT = TypeVar("_ScalarT", bound=generic) -_DTypeT = TypeVar("_DTypeT", bound=np.dtype) -_ArrayT = TypeVar("_ArrayT", bound=np.ndarray[Any, Any]) -_ShapeT = TypeVar("_ShapeT", bound=_Shape) -_AnyShapeT = TypeVar( - "_AnyShapeT", - tuple[()], - tuple[int], - tuple[int, int], - tuple[int, int, int], - tuple[int, int, int, int], - tuple[int, ...], +_AnyNumericScalarT = TypeVar( + "_AnyNumericScalarT", + np.int8, np.int16, np.int32, np.int64, + np.uint8, np.uint16, np.uint32, np.uint64, + np.float16, np.float32, np.float64, np.longdouble, + np.complex64, np.complex128, np.clongdouble, + np.timedelta64, + np.object_, ) -_CorrelateMode: TypeAlias = L["valid", "same", "full"] +type _CorrelateMode = L["valid", "same", "full"] -@overload -def zeros_like( - a: _ArrayT, - dtype: None = ..., - order: _OrderKACF = ..., - subok: L[True] = ..., - shape: None = ..., +type _Array1D[ScalarT: np.generic] = np.ndarray[tuple[int], np.dtype[ScalarT]] +type _Array2D[ScalarT: np.generic] = np.ndarray[tuple[int, int], np.dtype[ScalarT]] +type _Array3D[ScalarT: np.generic] = np.ndarray[tuple[int, int, int], np.dtype[ScalarT]] +type _Array4D[ScalarT: np.generic] = np.ndarray[tuple[int, int, int, int], np.dtype[ScalarT]] + +type _Int_co = np.integer | np.bool +type _Float_co = np.floating | _Int_co +type _Number_co = np.number | np.bool +type _TD64_co = np.timedelta64 | _Int_co + +type _ArrayLike1D[ScalarT: np.generic] = _SupportsArray[np.dtype[ScalarT]] | Sequence[ScalarT] +type _ArrayLike1DBool_co = _SupportsArray[np.dtype[np.bool]] | Sequence[py_bool | np.bool] +type _ArrayLike1DInt_co = _SupportsArray[np.dtype[_Int_co]] | Sequence[int | _Int_co] +type _ArrayLike1DFloat_co = _SupportsArray[np.dtype[_Float_co]] | Sequence[float | _Float_co] +type _ArrayLike1DNumber_co = _SupportsArray[np.dtype[_Number_co]] | Sequence[complex | _Number_co] +type _ArrayLike1DTD64_co = _ArrayLike1D[_TD64_co] +type _ArrayLike1DObject_co = _ArrayLike1D[np.object_] + +type _DTypeLikeInt = type[int] | _IntPCodes +type _DTypeLikeFloat64 = type[float] | _Float64Codes +type _DTypeLikeComplex128 = type[complex] | _Complex128Codes + +### + +# keep in sync with `ones_like` +@overload +def zeros_like[ArrayT: np.ndarray]( + a: ArrayT, + dtype: None = None, + order: _OrderKACF = "K", + subok: L[True] = True, + shape: None = None, *, - device: L["cpu"] | None = ..., -) -> _ArrayT: ... -@overload -def zeros_like( - a: _ArrayLike[_ScalarT], - dtype: None = ..., - order: _OrderKACF = ..., - subok: bool = ..., - shape: _ShapeLike | None = ..., + device: L["cpu"] | None = None, +) -> ArrayT: ... +@overload +def zeros_like[ScalarT: np.generic]( + a: _ArrayLike[ScalarT], + dtype: None = None, + order: _OrderKACF = "K", + subok: py_bool = True, + shape: _ShapeLike | None = None, *, - device: L["cpu"] | None = ..., -) -> NDArray[_ScalarT]: ... -@overload -def zeros_like( - a: Any, - dtype: _DTypeLike[_ScalarT], - order: _OrderKACF = ..., - subok: bool = ..., - shape: _ShapeLike | None = ..., + device: L["cpu"] | None = None, +) -> NDArray[ScalarT]: ... +@overload +def zeros_like[ScalarT: np.generic]( + a: object, + dtype: _DTypeLike[ScalarT], + order: _OrderKACF = "K", + subok: py_bool = True, + shape: _ShapeLike | None = None, *, - device: L["cpu"] | None = ..., -) -> NDArray[_ScalarT]: ... + device: L["cpu"] | None = None, +) -> NDArray[ScalarT]: ... @overload def zeros_like( - a: Any, - dtype: DTypeLike | None = ..., - order: _OrderKACF = ..., - subok: bool = ..., - shape: _ShapeLike | None = ..., + a: object, + dtype: DTypeLike | None = None, + order: _OrderKACF = "K", + subok: py_bool = True, + shape: _ShapeLike | None = None, *, - device: L["cpu"] | None = ..., + device: L["cpu"] | None = None, ) -> NDArray[Any]: ... ones: Final[_ConstructorEmpty] -@overload -def ones_like( - a: _ArrayT, - dtype: None = ..., - order: _OrderKACF = ..., - subok: L[True] = ..., - shape: None = ..., +# keep in sync with `zeros_like` +@overload +def ones_like[ArrayT: np.ndarray]( + a: ArrayT, + dtype: None = None, + order: _OrderKACF = "K", + subok: L[True] = True, + shape: None = None, *, - device: L["cpu"] | None = ..., -) -> _ArrayT: ... -@overload -def ones_like( - a: _ArrayLike[_ScalarT], - dtype: None = ..., - order: _OrderKACF = ..., - subok: bool = ..., - shape: _ShapeLike | None = ..., + device: L["cpu"] | None = None, +) -> ArrayT: ... +@overload +def ones_like[ScalarT: np.generic]( + a: _ArrayLike[ScalarT], + dtype: None = None, + order: _OrderKACF = "K", + subok: py_bool = True, + shape: _ShapeLike | None = None, *, - device: L["cpu"] | None = ..., -) -> NDArray[_ScalarT]: ... -@overload -def ones_like( - a: Any, - dtype: _DTypeLike[_ScalarT], - order: _OrderKACF = ..., - subok: bool = ..., - shape: _ShapeLike | None = ..., + device: L["cpu"] | None = None, +) -> NDArray[ScalarT]: ... +@overload +def ones_like[ScalarT: np.generic]( + a: object, + dtype: _DTypeLike[ScalarT], + order: _OrderKACF = "K", + subok: py_bool = True, + shape: _ShapeLike | None = None, *, - device: L["cpu"] | None = ..., -) -> NDArray[_ScalarT]: ... + device: L["cpu"] | None = None, +) -> NDArray[ScalarT]: ... @overload def ones_like( - a: Any, - dtype: DTypeLike | None = ..., - order: _OrderKACF = ..., - subok: bool = ..., - shape: _ShapeLike | None = ..., + a: object, + dtype: DTypeLike | None = None, + order: _OrderKACF = "K", + subok: py_bool = True, + shape: _ShapeLike | None = None, *, - device: L["cpu"] | None = ..., + device: L["cpu"] | None = None, ) -> NDArray[Any]: ... # TODO: Add overloads for bool, int, float, complex, str, bytes, and memoryview # 1-D shape @overload -def full( +def full[ScalarT: np.generic]( shape: SupportsIndex, - fill_value: _ScalarT, - dtype: None = ..., - order: _OrderCF = ..., - **kwargs: Unpack[_KwargsEmpty], -) -> _Array[tuple[int], _ScalarT]: ... + fill_value: ScalarT, + dtype: None = None, + order: _OrderCF = "C", + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, +) -> _Array[tuple[int], ScalarT]: ... @overload -def full( +def full[DTypeT: np.dtype]( shape: SupportsIndex, fill_value: Any, - dtype: _DTypeT | _SupportsDType[_DTypeT], - order: _OrderCF = ..., - **kwargs: Unpack[_KwargsEmpty], -) -> np.ndarray[tuple[int], _DTypeT]: ... + dtype: DTypeT | _SupportsDType[DTypeT], + order: _OrderCF = "C", + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, +) -> np.ndarray[tuple[int], DTypeT]: ... @overload -def full( +def full[ScalarT: np.generic]( shape: SupportsIndex, fill_value: Any, - dtype: type[_ScalarT], - order: _OrderCF = ..., - **kwargs: Unpack[_KwargsEmpty], -) -> _Array[tuple[int], _ScalarT]: ... + dtype: type[ScalarT], + order: _OrderCF = "C", + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, +) -> _Array[tuple[int], ScalarT]: ... @overload def full( shape: SupportsIndex, fill_value: Any, - dtype: DTypeLike | None = ..., - order: _OrderCF = ..., - **kwargs: Unpack[_KwargsEmpty], + dtype: DTypeLike | None = None, + order: _OrderCF = "C", + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, ) -> _Array[tuple[int], Any]: ... # known shape @overload -def full( - shape: _AnyShapeT, - fill_value: _ScalarT, - dtype: None = ..., - order: _OrderCF = ..., - **kwargs: Unpack[_KwargsEmpty], -) -> _Array[_AnyShapeT, _ScalarT]: ... +def full[ShapeT: _Shape, ScalarT: np.generic]( + shape: ShapeT, + fill_value: ScalarT, + dtype: None = None, + order: _OrderCF = "C", + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, +) -> _Array[ShapeT, ScalarT]: ... @overload -def full( - shape: _AnyShapeT, +def full[ShapeT: _Shape, DTypeT: np.dtype]( + shape: ShapeT, fill_value: Any, - dtype: _DTypeT | _SupportsDType[_DTypeT], - order: _OrderCF = ..., - **kwargs: Unpack[_KwargsEmpty], -) -> np.ndarray[_AnyShapeT, _DTypeT]: ... + dtype: DTypeT | _SupportsDType[DTypeT], + order: _OrderCF = "C", + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, +) -> np.ndarray[ShapeT, DTypeT]: ... @overload -def full( - shape: _AnyShapeT, +def full[ShapeT: _Shape, ScalarT: np.generic]( + shape: ShapeT, fill_value: Any, - dtype: type[_ScalarT], - order: _OrderCF = ..., - **kwargs: Unpack[_KwargsEmpty], -) -> _Array[_AnyShapeT, _ScalarT]: ... + dtype: type[ScalarT], + order: _OrderCF = "C", + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, +) -> _Array[ShapeT, ScalarT]: ... @overload -def full( - shape: _AnyShapeT, +def full[ShapeT: _Shape]( + shape: ShapeT, fill_value: Any, - dtype: DTypeLike | None = ..., - order: _OrderCF = ..., - **kwargs: Unpack[_KwargsEmpty], -) -> _Array[_AnyShapeT, Any]: ... + dtype: DTypeLike | None = None, + order: _OrderCF = "C", + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, +) -> _Array[ShapeT, Any]: ... # unknown shape @overload -def full( +def full[ScalarT: np.generic]( shape: _ShapeLike, - fill_value: _ScalarT, - dtype: None = ..., - order: _OrderCF = ..., - **kwargs: Unpack[_KwargsEmpty], -) -> NDArray[_ScalarT]: ... + fill_value: ScalarT, + dtype: None = None, + order: _OrderCF = "C", + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, +) -> NDArray[ScalarT]: ... @overload -def full( +def full[DTypeT: np.dtype]( shape: _ShapeLike, fill_value: Any, - dtype: _DTypeT | _SupportsDType[_DTypeT], - order: _OrderCF = ..., - **kwargs: Unpack[_KwargsEmpty], -) -> np.ndarray[Any, _DTypeT]: ... + dtype: DTypeT | _SupportsDType[DTypeT], + order: _OrderCF = "C", + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, +) -> np.ndarray[Any, DTypeT]: ... @overload -def full( +def full[ScalarT: np.generic]( shape: _ShapeLike, fill_value: Any, - dtype: type[_ScalarT], - order: _OrderCF = ..., - **kwargs: Unpack[_KwargsEmpty], -) -> NDArray[_ScalarT]: ... + dtype: type[ScalarT], + order: _OrderCF = "C", + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, +) -> NDArray[ScalarT]: ... @overload def full( shape: _ShapeLike, fill_value: Any, - dtype: DTypeLike | None = ..., - order: _OrderCF = ..., - **kwargs: Unpack[_KwargsEmpty], + dtype: DTypeLike | None = None, + order: _OrderCF = "C", + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, ) -> NDArray[Any]: ... @overload -def full_like( - a: _ArrayT, - fill_value: Any, - dtype: None = ..., - order: _OrderKACF = ..., - subok: L[True] = ..., - shape: None = ..., +def full_like[ArrayT: np.ndarray]( + a: ArrayT, + fill_value: object, + dtype: None = None, + order: _OrderKACF = "K", + subok: L[True] = True, + shape: None = None, *, - device: L["cpu"] | None = ..., -) -> _ArrayT: ... -@overload -def full_like( - a: _ArrayLike[_ScalarT], - fill_value: Any, - dtype: None = ..., - order: _OrderKACF = ..., - subok: bool = ..., - shape: _ShapeLike | None = ..., + device: L["cpu"] | None = None, +) -> ArrayT: ... +@overload +def full_like[ScalarT: np.generic]( + a: _ArrayLike[ScalarT], + fill_value: object, + dtype: None = None, + order: _OrderKACF = "K", + subok: py_bool = True, + shape: _ShapeLike | None = None, *, - device: L["cpu"] | None = ..., -) -> NDArray[_ScalarT]: ... -@overload -def full_like( - a: Any, - fill_value: Any, - dtype: _DTypeLike[_ScalarT], - order: _OrderKACF = ..., - subok: bool = ..., - shape: _ShapeLike | None = ..., + device: L["cpu"] | None = None, +) -> NDArray[ScalarT]: ... +@overload +def full_like[ScalarT: np.generic]( + a: object, + fill_value: object, + dtype: _DTypeLike[ScalarT], + order: _OrderKACF = "K", + subok: py_bool = True, + shape: _ShapeLike | None = None, *, - device: L["cpu"] | None = ..., -) -> NDArray[_ScalarT]: ... + device: L["cpu"] | None = None, +) -> NDArray[ScalarT]: ... @overload def full_like( - a: Any, - fill_value: Any, - dtype: DTypeLike | None = ..., - order: _OrderKACF = ..., - subok: bool = ..., - shape: _ShapeLike | None = ..., + a: object, + fill_value: object, + dtype: DTypeLike | None = None, + order: _OrderKACF = "K", + subok: py_bool = True, + shape: _ShapeLike | None = None, *, - device: L["cpu"] | None = ..., + device: L["cpu"] | None = None, ) -> NDArray[Any]: ... # @@ -441,442 +930,327 @@ def count_nonzero( a: NDArray[Any] | _NestedSequence[ArrayLike], axis: _ShapeLike | None = None, *, keepdims: L[True] ) -> NDArray[np.intp]: ... @overload -def count_nonzero(a: ArrayLike, axis: _ShapeLike | None = None, *, keepdims: bool = False) -> Any: ... +def count_nonzero(a: ArrayLike, axis: _ShapeLike | None = None, *, keepdims: py_bool = False) -> Any: ... # -def isfortran(a: NDArray[Any] | generic) -> bool: ... - -def argwhere(a: ArrayLike) -> NDArray[intp]: ... +def isfortran(a: ndarray | generic) -> py_bool: ... -def flatnonzero(a: ArrayLike) -> NDArray[intp]: ... +# +def argwhere(a: ArrayLike) -> _Array2D[np.intp]: ... +def flatnonzero(a: ArrayLike) -> _Array1D[np.intp]: ... +# keep in sync with `convolve` @overload def correlate( - a: _ArrayLike[Never], - v: _ArrayLike[Never], - mode: _CorrelateMode = ..., -) -> NDArray[Any]: ... -@overload -def correlate( - a: _ArrayLikeBool_co, - v: _ArrayLikeBool_co, - mode: _CorrelateMode = ..., -) -> NDArray[np.bool]: ... -@overload -def correlate( - a: _ArrayLikeUInt_co, - v: _ArrayLikeUInt_co, - mode: _CorrelateMode = ..., -) -> NDArray[unsignedinteger]: ... + a: _ArrayLike1D[_AnyNumericScalarT], v: _ArrayLike1D[_AnyNumericScalarT], mode: _CorrelateMode = "valid" +) -> _Array1D[_AnyNumericScalarT]: ... @overload -def correlate( - a: _ArrayLikeInt_co, - v: _ArrayLikeInt_co, - mode: _CorrelateMode = ..., -) -> NDArray[signedinteger]: ... +def correlate(a: _ArrayLike1DBool_co, v: _ArrayLike1DBool_co, mode: _CorrelateMode = "valid") -> _Array1D[np.bool]: ... @overload -def correlate( - a: _ArrayLikeFloat_co, - v: _ArrayLikeFloat_co, - mode: _CorrelateMode = ..., -) -> NDArray[floating]: ... +def correlate(a: _ArrayLike1DInt_co, v: _ArrayLike1DInt_co, mode: _CorrelateMode = "valid") -> _Array1D[np.int_ | Any]: ... @overload -def correlate( - a: _ArrayLikeComplex_co, - v: _ArrayLikeComplex_co, - mode: _CorrelateMode = ..., -) -> NDArray[complexfloating]: ... +def correlate(a: _ArrayLike1DFloat_co, v: _ArrayLike1DFloat_co, mode: _CorrelateMode = "valid") -> _Array1D[np.float64 | Any]: ... @overload def correlate( - a: _ArrayLikeTD64_co, - v: _ArrayLikeTD64_co, - mode: _CorrelateMode = ..., -) -> NDArray[timedelta64]: ... + a: _ArrayLike1DNumber_co, v: _ArrayLike1DNumber_co, mode: _CorrelateMode = "valid" +) -> _Array1D[np.complex128 | Any]: ... @overload def correlate( - a: _ArrayLikeObject_co, - v: _ArrayLikeObject_co, - mode: _CorrelateMode = ..., -) -> NDArray[object_]: ... + a: _ArrayLike1DTD64_co, v: _ArrayLike1DTD64_co, mode: _CorrelateMode = "valid" +) -> _Array1D[np.timedelta64 | Any]: ... +# keep in sync with `correlate` @overload def convolve( - a: _ArrayLike[Never], - v: _ArrayLike[Never], - mode: _CorrelateMode = ..., -) -> NDArray[Any]: ... + a: _ArrayLike1D[_AnyNumericScalarT], v: _ArrayLike1D[_AnyNumericScalarT], mode: _CorrelateMode = "valid" +) -> _Array1D[_AnyNumericScalarT]: ... @overload -def convolve( - a: _ArrayLikeBool_co, - v: _ArrayLikeBool_co, - mode: _CorrelateMode = ..., -) -> NDArray[np.bool]: ... +def convolve(a: _ArrayLike1DBool_co, v: _ArrayLike1DBool_co, mode: _CorrelateMode = "valid") -> _Array1D[np.bool]: ... @overload -def convolve( - a: _ArrayLikeUInt_co, - v: _ArrayLikeUInt_co, - mode: _CorrelateMode = ..., -) -> NDArray[unsignedinteger]: ... +def convolve(a: _ArrayLike1DInt_co, v: _ArrayLike1DInt_co, mode: _CorrelateMode = "valid") -> _Array1D[np.int_ | Any]: ... @overload -def convolve( - a: _ArrayLikeInt_co, - v: _ArrayLikeInt_co, - mode: _CorrelateMode = ..., -) -> NDArray[signedinteger]: ... -@overload -def convolve( - a: _ArrayLikeFloat_co, - v: _ArrayLikeFloat_co, - mode: _CorrelateMode = ..., -) -> NDArray[floating]: ... +def convolve(a: _ArrayLike1DFloat_co, v: _ArrayLike1DFloat_co, mode: _CorrelateMode = "valid") -> _Array1D[np.float64 | Any]: ... @overload def convolve( - a: _ArrayLikeComplex_co, - v: _ArrayLikeComplex_co, - mode: _CorrelateMode = ..., -) -> NDArray[complexfloating]: ... -@overload -def convolve( - a: _ArrayLikeTD64_co, - v: _ArrayLikeTD64_co, - mode: _CorrelateMode = ..., -) -> NDArray[timedelta64]: ... + a: _ArrayLike1DNumber_co, v: _ArrayLike1DNumber_co, mode: _CorrelateMode = "valid" +) -> _Array1D[np.complex128 | Any]: ... @overload def convolve( - a: _ArrayLikeObject_co, - v: _ArrayLikeObject_co, - mode: _CorrelateMode = ..., -) -> NDArray[object_]: ... + a: _ArrayLike1DTD64_co, v: _ArrayLike1DTD64_co, mode: _CorrelateMode = "valid" +) -> _Array1D[np.timedelta64 | Any]: ... +# keep roughly in sync with `convolve` and `correlate`, but for 2-D output and an additional `out` overload @overload def outer( - a: _ArrayLike[Never], - b: _ArrayLike[Never], - out: None = ..., -) -> NDArray[Any]: ... + a: _ArrayLike[_AnyNumericScalarT], b: _ArrayLike[_AnyNumericScalarT], out: None = None +) -> _Array2D[_AnyNumericScalarT]: ... @overload -def outer( - a: _ArrayLikeBool_co, - b: _ArrayLikeBool_co, - out: None = ..., -) -> NDArray[np.bool]: ... +def outer(a: _ArrayLikeBool_co, b: _ArrayLikeBool_co, out: None = None) -> _Array2D[np.bool]: ... @overload -def outer( - a: _ArrayLikeUInt_co, - b: _ArrayLikeUInt_co, - out: None = ..., -) -> NDArray[unsignedinteger]: ... +def outer(a: _ArrayLikeInt_co, b: _ArrayLikeInt_co, out: None = None) -> _Array2D[np.int_ | Any]: ... @overload -def outer( - a: _ArrayLikeInt_co, - b: _ArrayLikeInt_co, - out: None = ..., -) -> NDArray[signedinteger]: ... +def outer(a: _ArrayLikeFloat_co, b: _ArrayLikeFloat_co, out: None = None) -> _Array2D[np.float64 | Any]: ... @overload -def outer( - a: _ArrayLikeFloat_co, - b: _ArrayLikeFloat_co, - out: None = ..., -) -> NDArray[floating]: ... +def outer(a: _ArrayLikeComplex_co, b: _ArrayLikeComplex_co, out: None = None) -> _Array2D[np.complex128 | Any]: ... @overload -def outer( - a: _ArrayLikeComplex_co, - b: _ArrayLikeComplex_co, - out: None = ..., -) -> NDArray[complexfloating]: ... -@overload -def outer( - a: _ArrayLikeTD64_co, - b: _ArrayLikeTD64_co, - out: None = ..., -) -> NDArray[timedelta64]: ... -@overload -def outer( - a: _ArrayLikeObject_co, - b: _ArrayLikeObject_co, - out: None = ..., -) -> NDArray[object_]: ... +def outer(a: _ArrayLikeTD64_co, b: _ArrayLikeTD64_co, out: None = None) -> _Array2D[np.timedelta64 | Any]: ... @overload -def outer( - a: _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeObject_co, - b: _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeObject_co, - out: _ArrayT, -) -> _ArrayT: ... +def outer[ArrayT: np.ndarray](a: _ArrayLikeNumber_co | _ArrayLikeTD64_co, b: _ArrayLikeNumber_co | _ArrayLikeTD64_co, out: ArrayT) -> ArrayT: ... +# keep in sync with numpy.linalg._linalg.tensordot (ignoring `/, *`) @overload def tensordot( - a: _ArrayLike[Never], - b: _ArrayLike[Never], - axes: int | tuple[_ShapeLike, _ShapeLike] = ..., -) -> NDArray[Any]: ... -@overload -def tensordot( - a: _ArrayLikeBool_co, - b: _ArrayLikeBool_co, - axes: int | tuple[_ShapeLike, _ShapeLike] = ..., -) -> NDArray[np.bool]: ... -@overload -def tensordot( - a: _ArrayLikeUInt_co, - b: _ArrayLikeUInt_co, - axes: int | tuple[_ShapeLike, _ShapeLike] = ..., -) -> NDArray[unsignedinteger]: ... -@overload -def tensordot( - a: _ArrayLikeInt_co, - b: _ArrayLikeInt_co, - axes: int | tuple[_ShapeLike, _ShapeLike] = ..., -) -> NDArray[signedinteger]: ... + a: _ArrayLike[_AnyNumericScalarT], b: _ArrayLike[_AnyNumericScalarT], axes: int | tuple[_ShapeLike, _ShapeLike] = 2 +) -> NDArray[_AnyNumericScalarT]: ... @overload -def tensordot( - a: _ArrayLikeFloat_co, - b: _ArrayLikeFloat_co, - axes: int | tuple[_ShapeLike, _ShapeLike] = ..., -) -> NDArray[floating]: ... +def tensordot(a: _ArrayLikeBool_co, b: _ArrayLikeBool_co, axes: int | tuple[_ShapeLike, _ShapeLike] = 2) -> NDArray[np.bool]: ... @overload def tensordot( - a: _ArrayLikeComplex_co, - b: _ArrayLikeComplex_co, - axes: int | tuple[_ShapeLike, _ShapeLike] = ..., -) -> NDArray[complexfloating]: ... + a: _ArrayLikeInt_co, b: _ArrayLikeInt_co, axes: int | tuple[_ShapeLike, _ShapeLike] = 2 +) -> NDArray[np.int_ | Any]: ... @overload def tensordot( - a: _ArrayLikeTD64_co, - b: _ArrayLikeTD64_co, - axes: int | tuple[_ShapeLike, _ShapeLike] = ..., -) -> NDArray[timedelta64]: ... + a: _ArrayLikeFloat_co, b: _ArrayLikeFloat_co, axes: int | tuple[_ShapeLike, _ShapeLike] = 2 +) -> NDArray[np.float64 | Any]: ... @overload def tensordot( - a: _ArrayLikeObject_co, - b: _ArrayLikeObject_co, - axes: int | tuple[_ShapeLike, _ShapeLike] = ..., -) -> NDArray[object_]: ... + a: _ArrayLikeComplex_co, b: _ArrayLikeComplex_co, axes: int | tuple[_ShapeLike, _ShapeLike] = 2 +) -> NDArray[np.complex128 | Any]: ... -@overload -def roll( - a: _ArrayLike[_ScalarT], - shift: _ShapeLike, - axis: _ShapeLike | None = ..., -) -> NDArray[_ScalarT]: ... -@overload -def roll( - a: ArrayLike, - shift: _ShapeLike, - axis: _ShapeLike | None = ..., -) -> NDArray[Any]: ... - -def rollaxis( - a: NDArray[_ScalarT], - axis: int, - start: int = ..., -) -> NDArray[_ScalarT]: ... - -def moveaxis( - a: NDArray[_ScalarT], - source: _ShapeLike, - destination: _ShapeLike, -) -> NDArray[_ScalarT]: ... - -@overload -def cross( - a: _ArrayLike[Never], - b: _ArrayLike[Never], - axisa: int = ..., - axisb: int = ..., - axisc: int = ..., - axis: int | None = ..., -) -> NDArray[Any]: ... -@overload -def cross( - a: _ArrayLikeBool_co, - b: _ArrayLikeBool_co, - axisa: int = ..., - axisb: int = ..., - axisc: int = ..., - axis: int | None = ..., -) -> NoReturn: ... +# @overload def cross( - a: _ArrayLikeUInt_co, - b: _ArrayLikeUInt_co, - axisa: int = ..., - axisb: int = ..., - axisc: int = ..., - axis: int | None = ..., -) -> NDArray[unsignedinteger]: ... + a: _ArrayLike[_AnyNumericScalarT], + b: _ArrayLike[_AnyNumericScalarT], + axisa: int = -1, + axisb: int = -1, + axisc: int = -1, + axis: int | None = None, +) -> NDArray[_AnyNumericScalarT]: ... @overload def cross( a: _ArrayLikeInt_co, b: _ArrayLikeInt_co, - axisa: int = ..., - axisb: int = ..., - axisc: int = ..., - axis: int | None = ..., -) -> NDArray[signedinteger]: ... + axisa: int = -1, + axisb: int = -1, + axisc: int = -1, + axis: int | None = None, +) -> NDArray[np.int_ | Any]: ... @overload def cross( a: _ArrayLikeFloat_co, b: _ArrayLikeFloat_co, - axisa: int = ..., - axisb: int = ..., - axisc: int = ..., - axis: int | None = ..., -) -> NDArray[floating]: ... + axisa: int = -1, + axisb: int = -1, + axisc: int = -1, + axis: int | None = None, +) -> NDArray[np.float64 | Any]: ... @overload def cross( a: _ArrayLikeComplex_co, b: _ArrayLikeComplex_co, - axisa: int = ..., - axisb: int = ..., - axisc: int = ..., - axis: int | None = ..., -) -> NDArray[complexfloating]: ... -@overload -def cross( - a: _ArrayLikeObject_co, - b: _ArrayLikeObject_co, - axisa: int = ..., - axisb: int = ..., - axisc: int = ..., - axis: int | None = ..., -) -> NDArray[object_]: ... + axisa: int = -1, + axisb: int = -1, + axisc: int = -1, + axis: int | None = None, +) -> NDArray[np.complex128 | Any]: ... +# @overload -def indices( - dimensions: Sequence[int], - dtype: type[int] = ..., - sparse: L[False] = ..., -) -> NDArray[int_]: ... -@overload -def indices( - dimensions: Sequence[int], - dtype: type[int], - sparse: L[True], -) -> tuple[NDArray[int_], ...]: ... -@overload -def indices( - dimensions: Sequence[int], - dtype: type[int] = ..., - *, - sparse: L[True], -) -> tuple[NDArray[int_], ...]: ... -@overload -def indices( - dimensions: Sequence[int], - dtype: _DTypeLike[_ScalarT], - sparse: L[False] = ..., -) -> NDArray[_ScalarT]: ... -@overload -def indices( - dimensions: Sequence[int], - dtype: _DTypeLike[_ScalarT], - sparse: L[True], -) -> tuple[NDArray[_ScalarT], ...]: ... -@overload -def indices( - dimensions: Sequence[int], - dtype: DTypeLike = ..., - sparse: L[False] = ..., -) -> NDArray[Any]: ... +def roll[ArrayT: np.ndarray](a: ArrayT, shift: _ShapeLike, axis: _ShapeLike | None = None) -> ArrayT: ... @overload -def indices( - dimensions: Sequence[int], - dtype: DTypeLike, - sparse: L[True], -) -> tuple[NDArray[Any], ...]: ... +def roll[ScalarT: np.generic](a: _ArrayLike[ScalarT], shift: _ShapeLike, axis: _ShapeLike | None = None) -> NDArray[ScalarT]: ... @overload +def roll(a: ArrayLike, shift: _ShapeLike, axis: _ShapeLike | None = None) -> NDArray[Any]: ... + +# +def rollaxis[ArrayT: np.ndarray](a: ArrayT, axis: int, start: int = 0) -> ArrayT: ... +def moveaxis[ArrayT: np.ndarray](a: ArrayT, source: _ShapeLike, destination: _ShapeLike) -> ArrayT: ... +def normalize_axis_tuple( + axis: int | Iterable[int], + ndim: int, + argname: str | None = None, + allow_duplicate: py_bool | None = False, +) -> tuple[int, ...]: ... + +# +@overload # 0d, dtype=int (default), sparse=False (default) +def indices(dimensions: tuple[()], dtype: type[int] = int, sparse: L[False] = False) -> _Array1D[np.intp]: ... +@overload # 0d, dtype=, sparse=True +def indices(dimensions: tuple[()], dtype: DTypeLike | None = int, *, sparse: L[True]) -> tuple[()]: ... +@overload # 0d, dtype=, sparse=False (default) +def indices[ScalarT: np.generic](dimensions: tuple[()], dtype: _DTypeLike[ScalarT], sparse: L[False] = False) -> _Array1D[ScalarT]: ... +@overload # 0d, dtype=, sparse=False (default) +def indices(dimensions: tuple[()], dtype: DTypeLike, sparse: L[False] = False) -> _Array1D[Any]: ... +@overload # 1d, dtype=int (default), sparse=False (default) +def indices(dimensions: tuple[int], dtype: type[int] = int, sparse: L[False] = False) -> _Array2D[np.intp]: ... +@overload # 1d, dtype=int (default), sparse=True +def indices(dimensions: tuple[int], dtype: type[int] = int, *, sparse: L[True]) -> tuple[_Array1D[np.intp]]: ... +@overload # 1d, dtype=, sparse=False (default) +def indices[ScalarT: np.generic](dimensions: tuple[int], dtype: _DTypeLike[ScalarT], sparse: L[False] = False) -> _Array2D[ScalarT]: ... +@overload # 1d, dtype=, sparse=True +def indices[ScalarT: np.generic](dimensions: tuple[int], dtype: _DTypeLike[ScalarT], sparse: L[True]) -> tuple[_Array1D[ScalarT]]: ... +@overload # 1d, dtype=, sparse=False (default) +def indices(dimensions: tuple[int], dtype: DTypeLike, sparse: L[False] = False) -> _Array2D[Any]: ... +@overload # 1d, dtype=, sparse=True +def indices(dimensions: tuple[int], dtype: DTypeLike, sparse: L[True]) -> tuple[_Array1D[Any]]: ... +@overload # 2d, dtype=int (default), sparse=False (default) +def indices(dimensions: tuple[int, int], dtype: type[int] = int, sparse: L[False] = False) -> _Array3D[np.intp]: ... +@overload # 2d, dtype=int (default), sparse=True def indices( - dimensions: Sequence[int], - dtype: DTypeLike = ..., - *, - sparse: L[True], -) -> tuple[NDArray[Any], ...]: ... + dimensions: tuple[int, int], dtype: type[int] = int, *, sparse: L[True] +) -> tuple[_Array2D[np.intp], _Array2D[np.intp]]: ... +@overload # 2d, dtype=, sparse=False (default) +def indices[ScalarT: np.generic](dimensions: tuple[int, int], dtype: _DTypeLike[ScalarT], sparse: L[False] = False) -> _Array3D[ScalarT]: ... +@overload # 2d, dtype=, sparse=True +def indices[ScalarT: np.generic]( + dimensions: tuple[int, int], dtype: _DTypeLike[ScalarT], sparse: L[True] +) -> tuple[_Array2D[ScalarT], _Array2D[ScalarT]]: ... +@overload # 2d, dtype=, sparse=False (default) +def indices(dimensions: tuple[int, int], dtype: DTypeLike, sparse: L[False] = False) -> _Array3D[Any]: ... +@overload # 2d, dtype=, sparse=True +def indices(dimensions: tuple[int, int], dtype: DTypeLike, sparse: L[True]) -> tuple[_Array2D[Any], _Array2D[Any]]: ... +@overload # ?d, dtype=int (default), sparse=False (default) +def indices(dimensions: Sequence[int], dtype: type[int] = int, sparse: L[False] = False) -> NDArray[np.intp]: ... +@overload # ?d, dtype=int (default), sparse=True +def indices(dimensions: Sequence[int], dtype: type[int] = int, *, sparse: L[True]) -> tuple[NDArray[np.intp], ...]: ... +@overload # ?d, dtype=, sparse=False (default) +def indices[ScalarT: np.generic](dimensions: Sequence[int], dtype: _DTypeLike[ScalarT], sparse: L[False] = False) -> NDArray[ScalarT]: ... +@overload # ?d, dtype=, sparse=True +def indices[ScalarT: np.generic](dimensions: Sequence[int], dtype: _DTypeLike[ScalarT], sparse: L[True]) -> tuple[NDArray[ScalarT], ...]: ... +@overload # ?d, dtype=, sparse=False (default) +def indices(dimensions: Sequence[int], dtype: DTypeLike, sparse: L[False] = False) -> ndarray: ... +@overload # ?d, dtype=, sparse=True +def indices(dimensions: Sequence[int], dtype: DTypeLike, sparse: L[True]) -> tuple[ndarray, ...]: ... -def fromfunction( - function: Callable[..., _T], +# +def fromfunction[ReturnT]( + function: Callable[..., ReturnT], shape: Sequence[int], *, - dtype: DTypeLike = ..., - like: _SupportsArrayFunc | None = ..., - **kwargs: Any, -) -> _T: ... + dtype: DTypeLike | None = float, + like: _SupportsArrayFunc | None = None, + **kwargs: object, +) -> ReturnT: ... +# def isscalar(element: object) -> TypeGuard[generic | complex | str | bytes | memoryview]: ... -def binary_repr(num: SupportsIndex, width: int | None = ...) -> str: ... - -def base_repr( - number: SupportsAbs[float], - base: float = ..., - padding: SupportsIndex | None = ..., -) -> str: ... +# +def binary_repr(num: SupportsIndex, width: int | None = None) -> str: ... +def base_repr(number: SupportsAbs[float], base: float = 2, padding: SupportsIndex | None = 0) -> str: ... -@overload -def identity( - n: int, - dtype: None = ..., - *, - like: _SupportsArrayFunc | None = ..., -) -> NDArray[float64]: ... -@overload -def identity( - n: int, - dtype: _DTypeLike[_ScalarT], - *, - like: _SupportsArrayFunc | None = ..., -) -> NDArray[_ScalarT]: ... -@overload -def identity( - n: int, - dtype: DTypeLike | None = ..., - *, - like: _SupportsArrayFunc | None = ..., -) -> NDArray[Any]: ... +# +@overload # dtype: None (default) +def identity(n: int, dtype: None = None, *, like: _SupportsArrayFunc | None = None) -> _Array2D[np.float64]: ... +@overload # dtype: known scalar type +def identity[ScalarT: np.generic](n: int, dtype: _DTypeLike[ScalarT], *, like: _SupportsArrayFunc | None = None) -> _Array2D[ScalarT]: ... +@overload # dtype: like bool +def identity(n: int, dtype: _DTypeLikeBool, *, like: _SupportsArrayFunc | None = None) -> _Array2D[np.bool]: ... +@overload # dtype: like int_ +def identity(n: int, dtype: _DTypeLikeInt, *, like: _SupportsArrayFunc | None = None) -> _Array2D[np.int_ | Any]: ... +@overload # dtype: like float64 +def identity(n: int, dtype: _DTypeLikeFloat64, *, like: _SupportsArrayFunc | None = None) -> _Array2D[np.float64 | Any]: ... +@overload # dtype: like complex128 +def identity(n: int, dtype: _DTypeLikeComplex128, *, like: _SupportsArrayFunc | None = None) -> _Array2D[np.complex128 | Any]: ... +@overload # dtype: unknown +def identity(n: int, dtype: DTypeLike, *, like: _SupportsArrayFunc | None = None) -> _Array2D[Incomplete]: ... +# def allclose( a: ArrayLike, b: ArrayLike, - rtol: ArrayLike = ..., - atol: ArrayLike = ..., - equal_nan: bool = ..., -) -> bool: ... + rtol: ArrayLike = 1e-5, + atol: ArrayLike = 1e-8, + equal_nan: py_bool = False, +) -> py_bool: ... -@overload +# +@overload # scalar, scalar def isclose( - a: _ScalarLike_co, - b: _ScalarLike_co, - rtol: ArrayLike = ..., - atol: ArrayLike = ..., - equal_nan: bool = ..., + a: _NumberLike_co, + b: _NumberLike_co, + rtol: ArrayLike = 1e-5, + atol: ArrayLike = 1e-8, + equal_nan: py_bool = False, ) -> np.bool: ... -@overload +@overload # known shape, same shape or scalar +def isclose[ShapeT: _Shape]( + a: np.ndarray[ShapeT], + b: np.ndarray[ShapeT] | _NumberLike_co, + rtol: ArrayLike = 1e-5, + atol: ArrayLike = 1e-8, + equal_nan: py_bool = False, +) -> np.ndarray[ShapeT, np.dtype[np.bool]]: ... +@overload # same shape or scalar, known shape +def isclose[ShapeT: _Shape]( + a: np.ndarray[ShapeT] | _NumberLike_co, + b: np.ndarray[ShapeT], + rtol: ArrayLike = 1e-5, + atol: ArrayLike = 1e-8, + equal_nan: py_bool = False, +) -> np.ndarray[ShapeT, np.dtype[np.bool]]: ... +@overload # 1d sequence, <=1d array-like +def isclose( + a: Sequence[_NumberLike_co], + b: Sequence[_NumberLike_co] | _NumberLike_co | np.ndarray[tuple[int]], + rtol: ArrayLike = 1e-5, + atol: ArrayLike = 1e-8, + equal_nan: py_bool = False, +) -> np.ndarray[tuple[int], np.dtype[np.bool]]: ... +@overload # <=1d array-like, 1d sequence +def isclose( + a: Sequence[_NumberLike_co] | _NumberLike_co | np.ndarray[tuple[int]], + b: Sequence[_NumberLike_co], + rtol: ArrayLike = 1e-5, + atol: ArrayLike = 1e-8, + equal_nan: py_bool = False, +) -> np.ndarray[tuple[int], np.dtype[np.bool]]: ... +@overload # 2d sequence, <=2d array-like +def isclose( + a: Sequence[Sequence[_NumberLike_co]], + b: Sequence[Sequence[_NumberLike_co]] | Sequence[_NumberLike_co] | _NumberLike_co | np.ndarray[tuple[int] | tuple[int, int]], + rtol: ArrayLike = 1e-5, + atol: ArrayLike = 1e-8, + equal_nan: py_bool = False, +) -> np.ndarray[tuple[int], np.dtype[np.bool]]: ... +@overload # <=2d array-like, 2d sequence +def isclose( + b: Sequence[Sequence[_NumberLike_co]] | Sequence[_NumberLike_co] | _NumberLike_co | np.ndarray[tuple[int] | tuple[int, int]], + a: Sequence[Sequence[_NumberLike_co]], + rtol: ArrayLike = 1e-5, + atol: ArrayLike = 1e-8, + equal_nan: py_bool = False, +) -> np.ndarray[tuple[int], np.dtype[np.bool]]: ... +@overload # unknown shape, unknown shape def isclose( a: ArrayLike, b: ArrayLike, - rtol: ArrayLike = ..., - atol: ArrayLike = ..., - equal_nan: bool = ..., -) -> NDArray[np.bool]: ... - -def array_equal(a1: ArrayLike, a2: ArrayLike, equal_nan: bool = ...) -> bool: ... + rtol: ArrayLike = 1e-5, + atol: ArrayLike = 1e-8, + equal_nan: py_bool = False, +) -> NDArray[np.bool] | Any: ... -def array_equiv(a1: ArrayLike, a2: ArrayLike) -> bool: ... +# +def array_equal(a1: ArrayLike, a2: ArrayLike, equal_nan: py_bool = False) -> py_bool: ... +def array_equiv(a1: ArrayLike, a2: ArrayLike) -> py_bool: ... +# @overload -def astype( - x: ndarray[_ShapeT, dtype], - dtype: _DTypeLike[_ScalarT], +def astype[ShapeT: _Shape, ScalarT: np.generic]( + x: ndarray[ShapeT], + dtype: _DTypeLike[ScalarT], /, *, - copy: bool = ..., - device: L["cpu"] | None = ..., -) -> ndarray[_ShapeT, dtype[_ScalarT]]: ... + copy: py_bool = True, + device: L["cpu"] | None = None, +) -> ndarray[ShapeT, dtype[ScalarT]]: ... @overload -def astype( - x: ndarray[_ShapeT, dtype], - dtype: DTypeLike, +def astype[ShapeT: _Shape]( + x: ndarray[ShapeT], + dtype: DTypeLike | None, /, *, - copy: bool = ..., - device: L["cpu"] | None = ..., -) -> ndarray[_ShapeT, dtype]: ... + copy: py_bool = True, + device: L["cpu"] | None = None, +) -> ndarray[ShapeT]: ... diff --git a/numpy/_core/numerictypes.py b/numpy/_core/numerictypes.py index 135dc1b51d97..70570c8c0f39 100644 --- a/numpy/_core/numerictypes.py +++ b/numpy/_core/numerictypes.py @@ -220,7 +220,7 @@ def issctype(rep): Strings are also a scalar type: - >>> issctype(np.dtype('str')) + >>> issctype(np.dtype(np.str_)) True """ @@ -598,7 +598,7 @@ def _scalar_type_key(typ): ScalarType = [int, float, complex, bool, bytes, str, memoryview] -ScalarType += sorted(set(sctypeDict.values()), key=_scalar_type_key) +ScalarType += sorted(dict.fromkeys(sctypeDict.values()), key=_scalar_type_key) ScalarType = tuple(ScalarType) diff --git a/numpy/_core/numerictypes.pyi b/numpy/_core/numerictypes.pyi index 753fe34800d5..46bb6a379861 100644 --- a/numpy/_core/numerictypes.pyi +++ b/numpy/_core/numerictypes.pyi @@ -1,6 +1,5 @@ -import builtins -from typing import Any, TypedDict, type_check_only -from typing import Literal as L +from builtins import bool as py_bool +from typing import Any, Final, Literal as L, TypedDict, type_check_only import numpy as np from numpy import ( @@ -13,6 +12,8 @@ from numpy import ( clongdouble, complex64, complex128, + complex192, + complex256, complexfloating, csingle, datetime64, @@ -22,6 +23,8 @@ from numpy import ( float16, float32, float64, + float96, + float128, floating, generic, half, @@ -59,9 +62,8 @@ from numpy import ( void, ) from numpy._typing import DTypeLike -from numpy._typing._extended_precision import complex192, complex256, float96, float128 -from ._type_aliases import sctypeDict # noqa: F401 +from ._type_aliases import sctypeDict as sctypeDict from .multiarray import ( busday_count, busday_offset, @@ -142,51 +144,53 @@ __all__ = [ @type_check_only class _TypeCodes(TypedDict): - Character: L['c'] - Integer: L['bhilqnp'] - UnsignedInteger: L['BHILQNP'] - Float: L['efdg'] - Complex: L['FDG'] - AllInteger: L['bBhHiIlLqQnNpP'] - AllFloat: L['efdgFDG'] - Datetime: L['Mm'] - All: L['?bhilqnpBHILQNPefdgFDGSUVOMm'] + Character: L["c"] + Integer: L["bhilqnp"] + UnsignedInteger: L["BHILQNP"] + Float: L["efdg"] + Complex: L["FDG"] + AllInteger: L["bBhHiIlLqQnNpP"] + AllFloat: L["efdgFDG"] + Datetime: L["Mm"] + All: L["?bhilqnpBHILQNPefdgFDGSUVOMm"] -def isdtype(dtype: dtype | type[Any], kind: DTypeLike | tuple[DTypeLike, ...]) -> builtins.bool: ... +def isdtype(dtype: dtype | type, kind: DTypeLike | tuple[DTypeLike, ...]) -> py_bool: ... +def issubdtype(arg1: DTypeLike | None, arg2: DTypeLike | None) -> py_bool: ... -def issubdtype(arg1: DTypeLike, arg2: DTypeLike) -> builtins.bool: ... - -typecodes: _TypeCodes -ScalarType: tuple[ - type[int], - type[float], - type[complex], - type[builtins.bool], - type[bytes], - type[str], - type[memoryview], - type[np.bool], - type[csingle], - type[cdouble], - type[clongdouble], - type[half], - type[single], - type[double], - type[longdouble], - type[byte], - type[short], - type[intc], - type[long], - type[longlong], - type[timedelta64], - type[datetime64], - type[object_], - type[bytes_], - type[str_], - type[ubyte], - type[ushort], - type[uintc], - type[ulong], - type[ulonglong], - type[void], -] +typecodes: Final[_TypeCodes] = ... +ScalarType: Final[ + tuple[ + type[int], + type[float], + type[complex], + type[py_bool], + type[bytes], + type[str], + type[memoryview[Any]], + type[np.bool], + type[complex64], + type[complex128], + type[complex128 | complex192 | complex256], + type[float16], + type[float32], + type[float64], + type[float64 | float96 | float128], + type[int8], + type[int16], + type[int32], + type[int32 | int64], + type[int64], + type[datetime64], + type[timedelta64], + type[object_], + type[bytes_], + type[str_], + type[uint8], + type[uint16], + type[uint32], + type[uint32 | uint64], + type[uint64], + type[void], + ] +] = ... +typeDict: Final = sctypeDict diff --git a/numpy/_core/overrides.py b/numpy/_core/overrides.py index 6414710ae900..6d5e7750b09b 100644 --- a/numpy/_core/overrides.py +++ b/numpy/_core/overrides.py @@ -1,6 +1,7 @@ """Implementation of __array_function__ overrides from NEP-18.""" import collections import functools +import inspect from numpy._core._multiarray_umath import ( _ArrayFunctionDispatcher, @@ -156,11 +157,15 @@ def decorator(implementation): "argument and a keyword-only argument. " f"{implementation} does not seem to comply.") - if docs_from_dispatcher: - add_docstring(implementation, dispatcher.__doc__) + if docs_from_dispatcher and dispatcher.__doc__ is not None: + doc = inspect.cleandoc(dispatcher.__doc__) + add_docstring(implementation, doc) public_api = _ArrayFunctionDispatcher(dispatcher, implementation) - public_api = functools.wraps(implementation)(public_api) + functools.update_wrapper(public_api, implementation) + + if not verify and not getattr(implementation, "__text_signature__", None): + public_api.__signature__ = inspect.signature(dispatcher) if module is not None: public_api.__module__ = module diff --git a/numpy/_core/overrides.pyi b/numpy/_core/overrides.pyi index 05453190efd4..627165e98d3d 100644 --- a/numpy/_core/overrides.pyi +++ b/numpy/_core/overrides.pyi @@ -1,11 +1,10 @@ from collections.abc import Callable, Iterable -from typing import Any, Final, NamedTuple, ParamSpec, TypeVar +from typing import Any, Final, NamedTuple -from numpy._typing import _SupportsArrayFunc +from numpy._utils import set_module as set_module -_T = TypeVar("_T") -_Tss = ParamSpec("_Tss") -_FuncT = TypeVar("_FuncT", bound=Callable[..., object]) +type _FuncLike = type | Callable[..., object] +type _Dispatcher[**_Tss] = Callable[_Tss, Iterable[object]] ### @@ -18,31 +17,28 @@ class ArgSpec(NamedTuple): keywords: str | None defaults: tuple[Any, ...] -def get_array_function_like_doc(public_api: Callable[..., Any], docstring_template: str = "") -> str: ... -def finalize_array_function_like(public_api: _FuncT) -> _FuncT: ... +def get_array_function_like_doc(public_api: Callable[..., object], docstring_template: str = "") -> str: ... +def finalize_array_function_like[FuncLikeT: _FuncLike](public_api: FuncLikeT) -> FuncLikeT: ... # -def verify_matching_signatures( - implementation: Callable[_Tss, object], - dispatcher: Callable[_Tss, Iterable[_SupportsArrayFunc]], -) -> None: ... +def verify_matching_signatures[**Tss](implementation: Callable[Tss, object], dispatcher: _Dispatcher[Tss]) -> None: ... # NOTE: This actually returns a `_ArrayFunctionDispatcher` callable wrapper object, with # the original wrapped callable stored in the `._implementation` attribute. It checks # for any `__array_function__` of the values of specific arguments that the dispatcher # specifies. Since the dispatcher only returns an iterable of passed array-like args, # this overridable behaviour is impossible to annotate. -def array_function_dispatch( - dispatcher: Callable[_Tss, Iterable[_SupportsArrayFunc]] | None = None, +def array_function_dispatch[**Tss, FuncLikeT: _FuncLike]( + dispatcher: _Dispatcher[Tss] | None = None, module: str | None = None, verify: bool = True, docs_from_dispatcher: bool = False, -) -> Callable[[_FuncT], _FuncT]: ... +) -> Callable[[FuncLikeT], FuncLikeT]: ... # -def array_function_from_dispatcher( - implementation: Callable[_Tss, _T], +def array_function_from_dispatcher[**Tss, T]( + implementation: Callable[Tss, T], module: str | None = None, verify: bool = True, docs_from_dispatcher: bool = True, -) -> Callable[[Callable[_Tss, Iterable[_SupportsArrayFunc]]], Callable[_Tss, _T]]: ... +) -> Callable[[_Dispatcher[Tss]], Callable[Tss, T]]: ... diff --git a/numpy/_core/records.py b/numpy/_core/records.py index 39bcf4ba6294..9a6af16e3b23 100644 --- a/numpy/_core/records.py +++ b/numpy/_core/records.py @@ -8,8 +8,7 @@ from numpy._utils import set_module -from . import numeric as sb -from . import numerictypes as nt +from . import numeric as sb, numerictypes as nt from .arrayprint import _get_legacy_print_mode # All of the functions allow formats to be a dtype diff --git a/numpy/_core/records.pyi b/numpy/_core/records.pyi index 93177b2d3f75..326a0fe6e476 100644 --- a/numpy/_core/records.pyi +++ b/numpy/_core/records.pyi @@ -1,22 +1,20 @@ # ruff: noqa: ANN401 # pyright: reportSelfClsParameterName=false -from collections.abc import Iterable, Sequence +from _typeshed import Incomplete, StrOrBytesPath +from collections.abc import Buffer, Iterable, Sequence from typing import ( Any, ClassVar, Literal, Protocol, SupportsIndex, - TypeAlias, overload, type_check_only, ) - -from _typeshed import StrOrBytesPath from typing_extensions import TypeVar import numpy as np -from numpy import _ByteOrder, _OrderKACF, _SupportsBuffer +from numpy import _ByteOrder, _OrderKACF from numpy._typing import ( ArrayLike, DTypeLike, @@ -40,12 +38,11 @@ __all__ = [ "record", ] -_T = TypeVar("_T") -_ScalarT = TypeVar("_ScalarT", bound=np.generic) +# Explicit covariant type variables are needed because mypy isn't very good at variance inference right now. _DTypeT_co = TypeVar("_DTypeT_co", bound=np.dtype, default=np.dtype, covariant=True) _ShapeT_co = TypeVar("_ShapeT_co", bound=_Shape, default=_AnyShape, covariant=True) -_RecArray: TypeAlias = recarray[_AnyShape, np.dtype[_ScalarT]] +type _RecArray[_ScalarT: np.generic] = recarray[_AnyShape, np.dtype[_ScalarT]] @type_check_only class _SupportsReadInto(Protocol): @@ -56,29 +53,36 @@ class _SupportsReadInto(Protocol): ### # exported in `numpy.rec` -class record(np.void): - def __getattribute__(self, attr: str) -> Any: ... - def __setattr__(self, attr: str, val: ArrayLike) -> None: ... +class record(np.void): # type: ignore[misc] + __name__: ClassVar[Literal["record"]] = "record" + __module__: Literal["numpy"] = "numpy" + def pprint(self) -> str: ... + + def __getattribute__(self, attr: str, /) -> Any: ... + def __setattr__(self, attr: str, val: ArrayLike, /) -> None: ... + + # + @overload # type: ignore[override] + def __getitem__(self, key: str | SupportsIndex, /) -> Incomplete: ... @overload - def __getitem__(self, key: str | SupportsIndex) -> Any: ... - @overload - def __getitem__(self, key: list[str]) -> record: ... + def __getitem__(self, key: list[str], /) -> record: ... # exported in `numpy.rec` class recarray(np.ndarray[_ShapeT_co, _DTypeT_co]): - __name__: ClassVar[Literal["record"]] = "record" - __module__: Literal["numpy"] = "numpy" + __name__: ClassVar[Literal["recarray"]] = "recarray" + __module__: Literal["numpy.rec"] = "numpy.rec" + @overload def __new__( subtype, shape: _ShapeLike, dtype: None = None, - buf: _SupportsBuffer | None = None, + buf: Buffer | None = None, offset: SupportsIndex = 0, strides: _ShapeLike | None = None, *, - formats: DTypeLike, + formats: DTypeLike | None, names: str | Sequence[str] | None = None, titles: str | Sequence[str] | None = None, byteorder: _ByteOrder | None = None, @@ -89,8 +93,8 @@ class recarray(np.ndarray[_ShapeT_co, _DTypeT_co]): def __new__( subtype, shape: _ShapeLike, - dtype: DTypeLike, - buf: _SupportsBuffer | None = None, + dtype: DTypeLike | None, + buf: Buffer | None = None, offset: SupportsIndex = 0, strides: _ShapeLike | None = None, formats: None = None, @@ -99,16 +103,18 @@ class recarray(np.ndarray[_ShapeT_co, _DTypeT_co]): byteorder: None = None, aligned: Literal[False] = False, order: _OrderKACF = "C", - ) -> _RecArray[Any]: ... - def __array_finalize__(self, /, obj: object) -> None: ... + ) -> _RecArray[Incomplete]: ... + def __getattribute__(self, attr: str, /) -> Any: ... def __setattr__(self, attr: str, val: ArrayLike, /) -> None: ... + def __array_finalize__(self, /, obj: object) -> None: ... + # @overload def field(self, /, attr: int | str, val: ArrayLike) -> None: ... @overload - def field(self, /, attr: int | str, val: None = None) -> Any: ... + def field(self, /, attr: int | str, val: None = None) -> Incomplete: ... # exported in `numpy.rec` class format_parser: @@ -116,7 +122,7 @@ class format_parser: def __init__( self, /, - formats: DTypeLike, + formats: DTypeLike | None, names: str | Sequence[str] | None, titles: str | Sequence[str] | None, aligned: bool = False, @@ -141,7 +147,7 @@ def fromarrays( dtype: None = None, shape: _ShapeLike | None = None, *, - formats: DTypeLike, + formats: DTypeLike | None, names: str | Sequence[str] | None = None, titles: str | Sequence[str] | None = None, aligned: bool = False, @@ -165,7 +171,7 @@ def fromrecords( dtype: None = None, shape: _ShapeLike | None = None, *, - formats: DTypeLike, + formats: DTypeLike | None, names: str | Sequence[str] | None = None, titles: str | Sequence[str] | None = None, aligned: bool = False, @@ -175,8 +181,8 @@ def fromrecords( # exported in `numpy.rec` @overload def fromstring( - datastring: _SupportsBuffer, - dtype: DTypeLike, + datastring: Buffer, + dtype: DTypeLike | None, shape: _ShapeLike | None = None, offset: int = 0, formats: None = None, @@ -187,12 +193,12 @@ def fromstring( ) -> _RecArray[record]: ... @overload def fromstring( - datastring: _SupportsBuffer, + datastring: Buffer, dtype: None = None, shape: _ShapeLike | None = None, offset: int = 0, *, - formats: DTypeLike, + formats: DTypeLike | None, names: str | Sequence[str] | None = None, titles: str | Sequence[str] | None = None, aligned: bool = False, @@ -203,7 +209,7 @@ def fromstring( @overload def fromfile( fd: StrOrBytesPath | _SupportsReadInto, - dtype: DTypeLike, + dtype: DTypeLike | None, shape: _ShapeLike | None = None, offset: int = 0, formats: None = None, @@ -219,7 +225,7 @@ def fromfile( shape: _ShapeLike | None = None, offset: int = 0, *, - formats: DTypeLike, + formats: DTypeLike | None, names: str | Sequence[str] | None = None, titles: str | Sequence[str] | None = None, aligned: bool = False, @@ -228,8 +234,8 @@ def fromfile( # exported in `numpy.rec` @overload -def array( - obj: _ScalarT | NDArray[_ScalarT], +def array[ScalarT: np.generic]( + obj: ScalarT | NDArray[ScalarT], dtype: None = None, shape: _ShapeLike | None = None, offset: int = 0, @@ -240,11 +246,11 @@ def array( aligned: bool = False, byteorder: None = None, copy: bool = True, -) -> _RecArray[_ScalarT]: ... +) -> _RecArray[ScalarT]: ... @overload def array( obj: ArrayLike, - dtype: DTypeLike, + dtype: DTypeLike | None, shape: _ShapeLike | None = None, offset: int = 0, strides: tuple[int, ...] | None = None, @@ -263,7 +269,7 @@ def array( offset: int = 0, strides: tuple[int, ...] | None = None, *, - formats: DTypeLike, + formats: DTypeLike | None, names: str | Sequence[str] | None = None, titles: str | Sequence[str] | None = None, aligned: bool = False, @@ -273,7 +279,7 @@ def array( @overload def array( obj: None, - dtype: DTypeLike, + dtype: DTypeLike | None, shape: _ShapeLike, offset: int = 0, strides: tuple[int, ...] | None = None, @@ -292,7 +298,7 @@ def array( shape: _ShapeLike, offset: int = 0, strides: tuple[int, ...] | None = None, - formats: DTypeLike, + formats: DTypeLike | None, names: str | Sequence[str] | None = None, titles: str | Sequence[str] | None = None, aligned: bool = False, @@ -302,7 +308,7 @@ def array( @overload def array( obj: _SupportsReadInto, - dtype: DTypeLike, + dtype: DTypeLike | None, shape: _ShapeLike | None = None, offset: int = 0, strides: tuple[int, ...] | None = None, @@ -321,7 +327,7 @@ def array( offset: int = 0, strides: tuple[int, ...] | None = None, *, - formats: DTypeLike, + formats: DTypeLike | None, names: str | Sequence[str] | None = None, titles: str | Sequence[str] | None = None, aligned: bool = False, @@ -330,4 +336,4 @@ def array( ) -> _RecArray[record]: ... # exported in `numpy.rec` -def find_duplicate(list: Iterable[_T]) -> list[_T]: ... +def find_duplicate[T](list: Iterable[T]) -> list[T]: ... diff --git a/numpy/_core/shape_base.py b/numpy/_core/shape_base.py index c2a0f0dae789..39de8739db0e 100644 --- a/numpy/_core/shape_base.py +++ b/numpy/_core/shape_base.py @@ -5,9 +5,7 @@ import itertools import operator -from . import fromnumeric as _from_nx -from . import numeric as _nx -from . import overrides +from . import fromnumeric as _from_nx, numeric as _nx, overrides from .multiarray import array, asanyarray, normalize_axis_index array_function_dispatch = functools.partial( diff --git a/numpy/_core/shape_base.pyi b/numpy/_core/shape_base.pyi index c2c9c961e55b..b41602ae8d47 100644 --- a/numpy/_core/shape_base.pyi +++ b/numpy/_core/shape_base.pyi @@ -1,7 +1,8 @@ from collections.abc import Sequence -from typing import Any, SupportsIndex, TypeVar, overload +from typing import Any, SupportsIndex, overload -from numpy import _CastingKind, generic +import numpy as np +from numpy import _CastingKind from numpy._typing import ArrayLike, DTypeLike, NDArray, _ArrayLike, _DTypeLike __all__ = [ @@ -15,19 +16,17 @@ __all__ = [ "vstack", ] -_ScalarT = TypeVar("_ScalarT", bound=generic) -_ScalarT1 = TypeVar("_ScalarT1", bound=generic) -_ScalarT2 = TypeVar("_ScalarT2", bound=generic) -_ArrayT = TypeVar("_ArrayT", bound=NDArray[Any]) - -### - +# keep in sync with `numpy.ma.extras.atleast_1d` @overload -def atleast_1d(a0: _ArrayLike[_ScalarT], /) -> NDArray[_ScalarT]: ... +def atleast_1d[ScalarT: np.generic](a0: _ArrayLike[ScalarT], /) -> NDArray[ScalarT]: ... @overload -def atleast_1d(a0: _ArrayLike[_ScalarT1], a1: _ArrayLike[_ScalarT2], /) -> tuple[NDArray[_ScalarT1], NDArray[_ScalarT2]]: ... +def atleast_1d[ScalarT1: np.generic, ScalarT2: np.generic]( + a0: _ArrayLike[ScalarT1], a1: _ArrayLike[ScalarT2], / +) -> tuple[NDArray[ScalarT1], NDArray[ScalarT2]]: ... @overload -def atleast_1d(a0: _ArrayLike[_ScalarT], a1: _ArrayLike[_ScalarT], /, *arys: _ArrayLike[_ScalarT]) -> tuple[NDArray[_ScalarT], ...]: ... +def atleast_1d[ScalarT: np.generic]( + a0: _ArrayLike[ScalarT], a1: _ArrayLike[ScalarT], /, *arys: _ArrayLike[ScalarT] +) -> tuple[NDArray[ScalarT], ...]: ... @overload def atleast_1d(a0: ArrayLike, /) -> NDArray[Any]: ... @overload @@ -35,13 +34,17 @@ def atleast_1d(a0: ArrayLike, a1: ArrayLike, /) -> tuple[NDArray[Any], NDArray[A @overload def atleast_1d(a0: ArrayLike, a1: ArrayLike, /, *ai: ArrayLike) -> tuple[NDArray[Any], ...]: ... -# +# keep in sync with `numpy.ma.extras.atleast_2d` @overload -def atleast_2d(a0: _ArrayLike[_ScalarT], /) -> NDArray[_ScalarT]: ... +def atleast_2d[ScalarT: np.generic](a0: _ArrayLike[ScalarT], /) -> NDArray[ScalarT]: ... @overload -def atleast_2d(a0: _ArrayLike[_ScalarT1], a1: _ArrayLike[_ScalarT2], /) -> tuple[NDArray[_ScalarT1], NDArray[_ScalarT2]]: ... +def atleast_2d[ScalarT1: np.generic, ScalarT2: np.generic]( + a0: _ArrayLike[ScalarT1], a1: _ArrayLike[ScalarT2], / +) -> tuple[NDArray[ScalarT1], NDArray[ScalarT2]]: ... @overload -def atleast_2d(a0: _ArrayLike[_ScalarT], a1: _ArrayLike[_ScalarT], /, *arys: _ArrayLike[_ScalarT]) -> tuple[NDArray[_ScalarT], ...]: ... +def atleast_2d[ScalarT: np.generic]( + a0: _ArrayLike[ScalarT], a1: _ArrayLike[ScalarT], /, *arys: _ArrayLike[ScalarT] +) -> tuple[NDArray[ScalarT], ...]: ... @overload def atleast_2d(a0: ArrayLike, /) -> NDArray[Any]: ... @overload @@ -49,13 +52,17 @@ def atleast_2d(a0: ArrayLike, a1: ArrayLike, /) -> tuple[NDArray[Any], NDArray[A @overload def atleast_2d(a0: ArrayLike, a1: ArrayLike, /, *ai: ArrayLike) -> tuple[NDArray[Any], ...]: ... -# +# keep in sync with `numpy.ma.extras.atleast_3d` @overload -def atleast_3d(a0: _ArrayLike[_ScalarT], /) -> NDArray[_ScalarT]: ... +def atleast_3d[ScalarT: np.generic](a0: _ArrayLike[ScalarT], /) -> NDArray[ScalarT]: ... @overload -def atleast_3d(a0: _ArrayLike[_ScalarT1], a1: _ArrayLike[_ScalarT2], /) -> tuple[NDArray[_ScalarT1], NDArray[_ScalarT2]]: ... +def atleast_3d[ScalarT1: np.generic, ScalarT2: np.generic]( + a0: _ArrayLike[ScalarT1], a1: _ArrayLike[ScalarT2], / +) -> tuple[NDArray[ScalarT1], NDArray[ScalarT2]]: ... @overload -def atleast_3d(a0: _ArrayLike[_ScalarT], a1: _ArrayLike[_ScalarT], /, *arys: _ArrayLike[_ScalarT]) -> tuple[NDArray[_ScalarT], ...]: ... +def atleast_3d[ScalarT: np.generic]( + a0: _ArrayLike[ScalarT], a1: _ArrayLike[ScalarT], /, *arys: _ArrayLike[ScalarT] +) -> tuple[NDArray[ScalarT], ...]: ... @overload def atleast_3d(a0: ArrayLike, /) -> NDArray[Any]: ... @overload @@ -63,113 +70,118 @@ def atleast_3d(a0: ArrayLike, a1: ArrayLike, /) -> tuple[NDArray[Any], NDArray[A @overload def atleast_3d(a0: ArrayLike, a1: ArrayLike, /, *ai: ArrayLike) -> tuple[NDArray[Any], ...]: ... -# +# used by numpy.lib._shape_base_impl +def _arrays_for_stack_dispatcher[T](arrays: Sequence[T]) -> tuple[T, ...]: ... + +# keep in sync with `numpy.ma.extras.vstack` @overload -def vstack( - tup: Sequence[_ArrayLike[_ScalarT]], +def vstack[ScalarT: np.generic]( + tup: Sequence[_ArrayLike[ScalarT]], *, - dtype: None = ..., - casting: _CastingKind = ... -) -> NDArray[_ScalarT]: ... + dtype: None = None, + casting: _CastingKind = "same_kind" +) -> NDArray[ScalarT]: ... @overload -def vstack( +def vstack[ScalarT: np.generic]( tup: Sequence[ArrayLike], *, - dtype: _DTypeLike[_ScalarT], - casting: _CastingKind = ... -) -> NDArray[_ScalarT]: ... + dtype: _DTypeLike[ScalarT], + casting: _CastingKind = "same_kind" +) -> NDArray[ScalarT]: ... @overload def vstack( tup: Sequence[ArrayLike], *, - dtype: DTypeLike = ..., - casting: _CastingKind = ... + dtype: DTypeLike | None = None, + casting: _CastingKind = "same_kind" ) -> NDArray[Any]: ... +# keep in sync with `numpy.ma.extras.hstack` @overload -def hstack( - tup: Sequence[_ArrayLike[_ScalarT]], +def hstack[ScalarT: np.generic]( + tup: Sequence[_ArrayLike[ScalarT]], *, - dtype: None = ..., - casting: _CastingKind = ... -) -> NDArray[_ScalarT]: ... + dtype: None = None, + casting: _CastingKind = "same_kind" +) -> NDArray[ScalarT]: ... @overload -def hstack( +def hstack[ScalarT: np.generic]( tup: Sequence[ArrayLike], *, - dtype: _DTypeLike[_ScalarT], - casting: _CastingKind = ... -) -> NDArray[_ScalarT]: ... + dtype: _DTypeLike[ScalarT], + casting: _CastingKind = "same_kind" +) -> NDArray[ScalarT]: ... @overload def hstack( tup: Sequence[ArrayLike], *, - dtype: DTypeLike = ..., - casting: _CastingKind = ... + dtype: DTypeLike | None = None, + casting: _CastingKind = "same_kind" ) -> NDArray[Any]: ... +# keep in sync with `numpy.ma.extras.stack` @overload -def stack( - arrays: Sequence[_ArrayLike[_ScalarT]], - axis: SupportsIndex = ..., - out: None = ..., +def stack[ScalarT: np.generic]( + arrays: Sequence[_ArrayLike[ScalarT]], + axis: SupportsIndex = 0, + out: None = None, *, - dtype: None = ..., - casting: _CastingKind = ... -) -> NDArray[_ScalarT]: ... + dtype: None = None, + casting: _CastingKind = "same_kind" +) -> NDArray[ScalarT]: ... @overload -def stack( +def stack[ScalarT: np.generic]( arrays: Sequence[ArrayLike], - axis: SupportsIndex = ..., - out: None = ..., + axis: SupportsIndex = 0, + out: None = None, *, - dtype: _DTypeLike[_ScalarT], - casting: _CastingKind = ... -) -> NDArray[_ScalarT]: ... + dtype: _DTypeLike[ScalarT], + casting: _CastingKind = "same_kind" +) -> NDArray[ScalarT]: ... @overload def stack( arrays: Sequence[ArrayLike], - axis: SupportsIndex = ..., - out: None = ..., + axis: SupportsIndex = 0, + out: None = None, *, - dtype: DTypeLike = ..., - casting: _CastingKind = ... + dtype: DTypeLike | None = None, + casting: _CastingKind = "same_kind" ) -> NDArray[Any]: ... @overload -def stack( +def stack[OutT: np.ndarray]( arrays: Sequence[ArrayLike], axis: SupportsIndex, - out: _ArrayT, + out: OutT, *, dtype: DTypeLike | None = None, casting: _CastingKind = "same_kind", -) -> _ArrayT: ... +) -> OutT: ... @overload -def stack( +def stack[OutT: np.ndarray]( arrays: Sequence[ArrayLike], axis: SupportsIndex = 0, *, - out: _ArrayT, + out: OutT, dtype: DTypeLike | None = None, casting: _CastingKind = "same_kind", -) -> _ArrayT: ... +) -> OutT: ... @overload -def unstack( - array: _ArrayLike[_ScalarT], +def unstack[ScalarT: np.generic]( + array: _ArrayLike[ScalarT], /, *, - axis: int = ..., -) -> tuple[NDArray[_ScalarT], ...]: ... + axis: int = 0, +) -> tuple[NDArray[ScalarT], ...]: ... @overload def unstack( array: ArrayLike, /, *, - axis: int = ..., + axis: int = 0, ) -> tuple[NDArray[Any], ...]: ... @overload -def block(arrays: _ArrayLike[_ScalarT]) -> NDArray[_ScalarT]: ... +def block[ScalarT: np.generic](arrays: _ArrayLike[ScalarT]) -> NDArray[ScalarT]: ... @overload def block(arrays: ArrayLike) -> NDArray[Any]: ... diff --git a/numpy/_core/src/_simd/_simd.c b/numpy/_core/src/_simd/_simd.c index 2f0a5df6375c..d25d7bbf1c38 100644 --- a/numpy/_core/src/_simd/_simd.c +++ b/numpy/_core/src/_simd/_simd.c @@ -88,7 +88,7 @@ PyMODINIT_FUNC PyInit__simd(void) NPY_MTARGETS_CONF_DISPATCH(NPY_CPU_HAVE, ATTACH_MODULE, MAKE_MSVC_HAPPY) NPY_MTARGETS_CONF_BASELINE(ATTACH_BASELINE_MODULE, MAKE_MSVC_HAPPY) -#if Py_GIL_DISABLED +#ifdef Py_GIL_DISABLED // signal this module supports running with the GIL disabled PyUnstable_Module_SetGIL(m, Py_MOD_GIL_NOT_USED); #endif diff --git a/numpy/distutils/checks/cpu_asimd.c b/numpy/_core/src/_simd/checks/cpu_asimd.c similarity index 100% rename from numpy/distutils/checks/cpu_asimd.c rename to numpy/_core/src/_simd/checks/cpu_asimd.c diff --git a/numpy/distutils/checks/cpu_asimddp.c b/numpy/_core/src/_simd/checks/cpu_asimddp.c similarity index 100% rename from numpy/distutils/checks/cpu_asimddp.c rename to numpy/_core/src/_simd/checks/cpu_asimddp.c diff --git a/numpy/distutils/checks/cpu_asimdfhm.c b/numpy/_core/src/_simd/checks/cpu_asimdfhm.c similarity index 100% rename from numpy/distutils/checks/cpu_asimdfhm.c rename to numpy/_core/src/_simd/checks/cpu_asimdfhm.c diff --git a/numpy/distutils/checks/cpu_asimdhp.c b/numpy/_core/src/_simd/checks/cpu_asimdhp.c similarity index 100% rename from numpy/distutils/checks/cpu_asimdhp.c rename to numpy/_core/src/_simd/checks/cpu_asimdhp.c diff --git a/numpy/distutils/checks/cpu_avx.c b/numpy/_core/src/_simd/checks/cpu_avx.c similarity index 100% rename from numpy/distutils/checks/cpu_avx.c rename to numpy/_core/src/_simd/checks/cpu_avx.c diff --git a/numpy/distutils/checks/cpu_avx2.c b/numpy/_core/src/_simd/checks/cpu_avx2.c similarity index 100% rename from numpy/distutils/checks/cpu_avx2.c rename to numpy/_core/src/_simd/checks/cpu_avx2.c diff --git a/numpy/distutils/checks/cpu_avx512_clx.c b/numpy/_core/src/_simd/checks/cpu_avx512_clx.c similarity index 100% rename from numpy/distutils/checks/cpu_avx512_clx.c rename to numpy/_core/src/_simd/checks/cpu_avx512_clx.c diff --git a/numpy/distutils/checks/cpu_avx512_cnl.c b/numpy/_core/src/_simd/checks/cpu_avx512_cnl.c similarity index 100% rename from numpy/distutils/checks/cpu_avx512_cnl.c rename to numpy/_core/src/_simd/checks/cpu_avx512_cnl.c diff --git a/numpy/distutils/checks/cpu_avx512_icl.c b/numpy/_core/src/_simd/checks/cpu_avx512_icl.c similarity index 100% rename from numpy/distutils/checks/cpu_avx512_icl.c rename to numpy/_core/src/_simd/checks/cpu_avx512_icl.c diff --git a/numpy/distutils/checks/cpu_avx512_knl.c b/numpy/_core/src/_simd/checks/cpu_avx512_knl.c similarity index 100% rename from numpy/distutils/checks/cpu_avx512_knl.c rename to numpy/_core/src/_simd/checks/cpu_avx512_knl.c diff --git a/numpy/distutils/checks/cpu_avx512_knm.c b/numpy/_core/src/_simd/checks/cpu_avx512_knm.c similarity index 100% rename from numpy/distutils/checks/cpu_avx512_knm.c rename to numpy/_core/src/_simd/checks/cpu_avx512_knm.c diff --git a/numpy/distutils/checks/cpu_avx512_skx.c b/numpy/_core/src/_simd/checks/cpu_avx512_skx.c similarity index 100% rename from numpy/distutils/checks/cpu_avx512_skx.c rename to numpy/_core/src/_simd/checks/cpu_avx512_skx.c diff --git a/numpy/distutils/checks/cpu_avx512_spr.c b/numpy/_core/src/_simd/checks/cpu_avx512_spr.c similarity index 100% rename from numpy/distutils/checks/cpu_avx512_spr.c rename to numpy/_core/src/_simd/checks/cpu_avx512_spr.c diff --git a/numpy/distutils/checks/cpu_avx512cd.c b/numpy/_core/src/_simd/checks/cpu_avx512cd.c similarity index 100% rename from numpy/distutils/checks/cpu_avx512cd.c rename to numpy/_core/src/_simd/checks/cpu_avx512cd.c diff --git a/numpy/distutils/checks/cpu_avx512f.c b/numpy/_core/src/_simd/checks/cpu_avx512f.c similarity index 100% rename from numpy/distutils/checks/cpu_avx512f.c rename to numpy/_core/src/_simd/checks/cpu_avx512f.c diff --git a/numpy/distutils/checks/cpu_f16c.c b/numpy/_core/src/_simd/checks/cpu_f16c.c similarity index 100% rename from numpy/distutils/checks/cpu_f16c.c rename to numpy/_core/src/_simd/checks/cpu_f16c.c diff --git a/numpy/distutils/checks/cpu_fma3.c b/numpy/_core/src/_simd/checks/cpu_fma3.c similarity index 100% rename from numpy/distutils/checks/cpu_fma3.c rename to numpy/_core/src/_simd/checks/cpu_fma3.c diff --git a/numpy/distutils/checks/cpu_fma4.c b/numpy/_core/src/_simd/checks/cpu_fma4.c similarity index 100% rename from numpy/distutils/checks/cpu_fma4.c rename to numpy/_core/src/_simd/checks/cpu_fma4.c diff --git a/numpy/distutils/checks/cpu_lsx.c b/numpy/_core/src/_simd/checks/cpu_lsx.c similarity index 100% rename from numpy/distutils/checks/cpu_lsx.c rename to numpy/_core/src/_simd/checks/cpu_lsx.c diff --git a/numpy/distutils/checks/cpu_neon.c b/numpy/_core/src/_simd/checks/cpu_neon.c similarity index 100% rename from numpy/distutils/checks/cpu_neon.c rename to numpy/_core/src/_simd/checks/cpu_neon.c diff --git a/numpy/distutils/checks/cpu_neon_fp16.c b/numpy/_core/src/_simd/checks/cpu_neon_fp16.c similarity index 100% rename from numpy/distutils/checks/cpu_neon_fp16.c rename to numpy/_core/src/_simd/checks/cpu_neon_fp16.c diff --git a/numpy/distutils/checks/cpu_neon_vfpv4.c b/numpy/_core/src/_simd/checks/cpu_neon_vfpv4.c similarity index 100% rename from numpy/distutils/checks/cpu_neon_vfpv4.c rename to numpy/_core/src/_simd/checks/cpu_neon_vfpv4.c diff --git a/numpy/distutils/checks/cpu_popcnt.c b/numpy/_core/src/_simd/checks/cpu_popcnt.c similarity index 100% rename from numpy/distutils/checks/cpu_popcnt.c rename to numpy/_core/src/_simd/checks/cpu_popcnt.c diff --git a/numpy/distutils/checks/cpu_rvv.c b/numpy/_core/src/_simd/checks/cpu_rvv.c similarity index 100% rename from numpy/distutils/checks/cpu_rvv.c rename to numpy/_core/src/_simd/checks/cpu_rvv.c diff --git a/numpy/distutils/checks/cpu_sse.c b/numpy/_core/src/_simd/checks/cpu_sse.c similarity index 100% rename from numpy/distutils/checks/cpu_sse.c rename to numpy/_core/src/_simd/checks/cpu_sse.c diff --git a/numpy/distutils/checks/cpu_sse2.c b/numpy/_core/src/_simd/checks/cpu_sse2.c similarity index 100% rename from numpy/distutils/checks/cpu_sse2.c rename to numpy/_core/src/_simd/checks/cpu_sse2.c diff --git a/numpy/distutils/checks/cpu_sse3.c b/numpy/_core/src/_simd/checks/cpu_sse3.c similarity index 100% rename from numpy/distutils/checks/cpu_sse3.c rename to numpy/_core/src/_simd/checks/cpu_sse3.c diff --git a/numpy/distutils/checks/cpu_sse41.c b/numpy/_core/src/_simd/checks/cpu_sse41.c similarity index 100% rename from numpy/distutils/checks/cpu_sse41.c rename to numpy/_core/src/_simd/checks/cpu_sse41.c diff --git a/numpy/distutils/checks/cpu_sse42.c b/numpy/_core/src/_simd/checks/cpu_sse42.c similarity index 100% rename from numpy/distutils/checks/cpu_sse42.c rename to numpy/_core/src/_simd/checks/cpu_sse42.c diff --git a/numpy/distutils/checks/cpu_ssse3.c b/numpy/_core/src/_simd/checks/cpu_ssse3.c similarity index 100% rename from numpy/distutils/checks/cpu_ssse3.c rename to numpy/_core/src/_simd/checks/cpu_ssse3.c diff --git a/numpy/distutils/checks/cpu_sve.c b/numpy/_core/src/_simd/checks/cpu_sve.c similarity index 100% rename from numpy/distutils/checks/cpu_sve.c rename to numpy/_core/src/_simd/checks/cpu_sve.c diff --git a/numpy/distutils/checks/cpu_vsx.c b/numpy/_core/src/_simd/checks/cpu_vsx.c similarity index 100% rename from numpy/distutils/checks/cpu_vsx.c rename to numpy/_core/src/_simd/checks/cpu_vsx.c diff --git a/numpy/distutils/checks/cpu_vsx2.c b/numpy/_core/src/_simd/checks/cpu_vsx2.c similarity index 100% rename from numpy/distutils/checks/cpu_vsx2.c rename to numpy/_core/src/_simd/checks/cpu_vsx2.c diff --git a/numpy/distutils/checks/cpu_vsx3.c b/numpy/_core/src/_simd/checks/cpu_vsx3.c similarity index 100% rename from numpy/distutils/checks/cpu_vsx3.c rename to numpy/_core/src/_simd/checks/cpu_vsx3.c diff --git a/numpy/distutils/checks/cpu_vsx4.c b/numpy/_core/src/_simd/checks/cpu_vsx4.c similarity index 100% rename from numpy/distutils/checks/cpu_vsx4.c rename to numpy/_core/src/_simd/checks/cpu_vsx4.c diff --git a/numpy/distutils/checks/cpu_vx.c b/numpy/_core/src/_simd/checks/cpu_vx.c similarity index 100% rename from numpy/distutils/checks/cpu_vx.c rename to numpy/_core/src/_simd/checks/cpu_vx.c diff --git a/numpy/distutils/checks/cpu_vxe.c b/numpy/_core/src/_simd/checks/cpu_vxe.c similarity index 100% rename from numpy/distutils/checks/cpu_vxe.c rename to numpy/_core/src/_simd/checks/cpu_vxe.c diff --git a/numpy/distutils/checks/cpu_vxe2.c b/numpy/_core/src/_simd/checks/cpu_vxe2.c similarity index 100% rename from numpy/distutils/checks/cpu_vxe2.c rename to numpy/_core/src/_simd/checks/cpu_vxe2.c diff --git a/numpy/distutils/checks/cpu_xop.c b/numpy/_core/src/_simd/checks/cpu_xop.c similarity index 100% rename from numpy/distutils/checks/cpu_xop.c rename to numpy/_core/src/_simd/checks/cpu_xop.c diff --git a/numpy/distutils/checks/extra_avx512bw_mask.c b/numpy/_core/src/_simd/checks/extra_avx512bw_mask.c similarity index 100% rename from numpy/distutils/checks/extra_avx512bw_mask.c rename to numpy/_core/src/_simd/checks/extra_avx512bw_mask.c diff --git a/numpy/distutils/checks/extra_avx512dq_mask.c b/numpy/_core/src/_simd/checks/extra_avx512dq_mask.c similarity index 100% rename from numpy/distutils/checks/extra_avx512dq_mask.c rename to numpy/_core/src/_simd/checks/extra_avx512dq_mask.c diff --git a/numpy/distutils/checks/extra_avx512f_reduce.c b/numpy/_core/src/_simd/checks/extra_avx512f_reduce.c similarity index 100% rename from numpy/distutils/checks/extra_avx512f_reduce.c rename to numpy/_core/src/_simd/checks/extra_avx512f_reduce.c diff --git a/numpy/distutils/checks/extra_vsx3_half_double.c b/numpy/_core/src/_simd/checks/extra_vsx3_half_double.c similarity index 100% rename from numpy/distutils/checks/extra_vsx3_half_double.c rename to numpy/_core/src/_simd/checks/extra_vsx3_half_double.c diff --git a/numpy/distutils/checks/extra_vsx4_mma.c b/numpy/_core/src/_simd/checks/extra_vsx4_mma.c similarity index 100% rename from numpy/distutils/checks/extra_vsx4_mma.c rename to numpy/_core/src/_simd/checks/extra_vsx4_mma.c diff --git a/numpy/distutils/checks/extra_vsx_asm.c b/numpy/_core/src/_simd/checks/extra_vsx_asm.c similarity index 100% rename from numpy/distutils/checks/extra_vsx_asm.c rename to numpy/_core/src/_simd/checks/extra_vsx_asm.c diff --git a/numpy/distutils/checks/test_flags.c b/numpy/_core/src/_simd/checks/test_flags.c similarity index 100% rename from numpy/distutils/checks/test_flags.c rename to numpy/_core/src/_simd/checks/test_flags.c diff --git a/numpy/_core/src/common/array_assign.h b/numpy/_core/src/common/array_assign.h index 8a28ed1d3a01..cc5f044ef080 100644 --- a/numpy/_core/src/common/array_assign.h +++ b/numpy/_core/src/common/array_assign.h @@ -46,7 +46,7 @@ PyArray_AssignRawScalar(PyArrayObject *dst, NPY_NO_EXPORT int raw_array_assign_scalar(int ndim, npy_intp const *shape, PyArray_Descr *dst_dtype, char *dst_data, npy_intp const *dst_strides, - PyArray_Descr *src_dtype, char *src_data); + PyArray_Descr *src_dtype, char *src_data, NPY_CASTING casting); /* * Assigns the scalar value to every element of the destination raw array @@ -59,7 +59,7 @@ raw_array_wheremasked_assign_scalar(int ndim, npy_intp const *shape, PyArray_Descr *dst_dtype, char *dst_data, npy_intp const *dst_strides, PyArray_Descr *src_dtype, char *src_data, PyArray_Descr *wheremask_dtype, char *wheremask_data, - npy_intp const *wheremask_strides); + npy_intp const *wheremask_strides, NPY_CASTING casting); /******** LOW-LEVEL ARRAY MANIPULATION HELPERS ********/ diff --git a/numpy/_core/src/common/binop_override.h b/numpy/_core/src/common/binop_override.h index a6b4747ca560..e17b147c1d0a 100644 --- a/numpy/_core/src/common/binop_override.h +++ b/numpy/_core/src/common/binop_override.h @@ -1,8 +1,8 @@ #ifndef NUMPY_CORE_SRC_COMMON_BINOP_OVERRIDE_H_ #define NUMPY_CORE_SRC_COMMON_BINOP_OVERRIDE_H_ -#include #include +#include #include "numpy/arrayobject.h" #include "get_attr_string.h" diff --git a/numpy/_core/src/common/blas_utils.c b/numpy/_core/src/common/blas_utils.c new file mode 100644 index 000000000000..43d4b1e845f0 --- /dev/null +++ b/numpy/_core/src/common/blas_utils.c @@ -0,0 +1,61 @@ +#include "numpy/npy_math.h" // npy_get_floatstatus_barrier +#include "numpy/numpyconfig.h" // NPY_VISIBILITY_HIDDEN +#include "blas_utils.h" +#include "npy_cblas.h" + +#include +#include +#include + +#ifdef __APPLE__ +#include +#endif + +#if NPY_BLAS_CHECK_FPE_SUPPORT +/* + * Static variable to cache runtime check of BLAS FPE support. + * Will always be false (ignore all FPE) when accelerate is the compiled backend + */ + #if defined(ACCELERATE_NEW_LAPACK) +static bool blas_supports_fpe = false; + #else +static bool blas_supports_fpe = true; + #endif // ACCELERATE_NEW_LAPACK + +#endif // NPY_BLAS_CHECK_FPE_SUPPORT + + +NPY_VISIBILITY_HIDDEN bool +npy_blas_supports_fpe(void) +{ +#if NPY_BLAS_CHECK_FPE_SUPPORT + return blas_supports_fpe; +#else + return true; +#endif +} + +NPY_VISIBILITY_HIDDEN bool +npy_set_blas_supports_fpe(bool value) +{ +#if NPY_BLAS_CHECK_FPE_SUPPORT + blas_supports_fpe = (bool)value; + return blas_supports_fpe; +#endif + return true; // ignore input not set up on this platform +} + +NPY_VISIBILITY_HIDDEN int +npy_get_floatstatus_after_blas(void) +{ +#if NPY_BLAS_CHECK_FPE_SUPPORT + if (!blas_supports_fpe){ + // BLAS does not support FPE and we need to return FPE state. + // Instead of clearing and then grabbing state, just return + // that no flags are set. + return 0; + } +#endif + char *param = NULL; + return npy_get_floatstatus_barrier(param); +} diff --git a/numpy/_core/src/common/blas_utils.h b/numpy/_core/src/common/blas_utils.h new file mode 100644 index 000000000000..79d1e5ce274c --- /dev/null +++ b/numpy/_core/src/common/blas_utils.h @@ -0,0 +1,33 @@ +#include "numpy/numpyconfig.h" // for NPY_VISIBILITY_HIDDEN + +#include + +/* + * NPY_BLAS_CHECK_FPE_SUPPORT controls whether we need a runtime check + * for floating-point error (FPE) support in BLAS. + * The known culprit right now is SVM likely only on mac, but that is not + * quite clear. + * This checks always on all ARM (it is a small check overall). + */ +#if defined(__APPLE__) && defined(__aarch64__) && defined(HAVE_CBLAS) +#define NPY_BLAS_CHECK_FPE_SUPPORT 1 +#else +#define NPY_BLAS_CHECK_FPE_SUPPORT 0 +#endif + +/* Runtime check if BLAS supports floating-point errors. + * true - BLAS supports FPE and one can rely on them to indicate errors + * false - BLAS does not support FPE. Special handling needed for FPE state + */ +NPY_VISIBILITY_HIDDEN bool +npy_blas_supports_fpe(void); + +/* Allow setting the BLAS FPE flag from Python.*/ +NPY_VISIBILITY_HIDDEN bool +npy_set_blas_supports_fpe(bool value); + +/* If BLAS supports FPE, exactly the same as npy_get_floatstatus_barrier(). + * Otherwise, we can't rely on FPE state and need special handling. + */ +NPY_VISIBILITY_HIDDEN int +npy_get_floatstatus_after_blas(void); diff --git a/numpy/_core/src/common/cblasfuncs.c b/numpy/_core/src/common/cblasfuncs.c index f9d683d812d4..66a215dfeb64 100644 --- a/numpy/_core/src/common/cblasfuncs.c +++ b/numpy/_core/src/common/cblasfuncs.c @@ -12,6 +12,7 @@ #include "numpy/arrayobject.h" #include "numpy/npy_math.h" #include "numpy/ufuncobject.h" +#include "blas_utils.h" #include "npy_cblas.h" #include "arraytypes.h" #include "common.h" @@ -693,7 +694,7 @@ cblas_matrixproduct(int typenum, PyArrayObject *ap1, PyArrayObject *ap2, NPY_END_ALLOW_THREADS; } - int fpes = npy_get_floatstatus_barrier((char *) result); + int fpes = npy_get_floatstatus_after_blas(); if (fpes && PyUFunc_GiveFloatingpointErrors("dot", fpes) < 0) { goto fail; } diff --git a/numpy/_core/src/common/dlpack/dlpack.h b/numpy/_core/src/common/dlpack/dlpack.h index 19ecc27761f8..4dc164fe9c1b 100644 --- a/numpy/_core/src/common/dlpack/dlpack.h +++ b/numpy/_core/src/common/dlpack/dlpack.h @@ -270,7 +270,7 @@ typedef struct DLManagedTensor { void (*deleter)(struct DLManagedTensor * self); } DLManagedTensor; -// bit masks used in in the DLManagedTensorVersioned +// bit masks used in the DLManagedTensorVersioned /*! \brief bit mask to indicate that the tensor is read only. */ #define DLPACK_FLAG_BITMASK_READ_ONLY (1UL << 0UL) diff --git a/numpy/_core/src/common/half.hpp b/numpy/_core/src/common/half.hpp index 484750ad84cd..14dabbe79d7f 100644 --- a/numpy/_core/src/common/half.hpp +++ b/numpy/_core/src/common/half.hpp @@ -9,8 +9,6 @@ // TODO(@seiko2plus): // - covers half-precision operations that being supported by numpy/halffloat.h // - add support for arithmetic operations -// - enables __fp16 causes massive FP exceptions on aarch64, -// needs a deep investigation namespace np { @@ -19,42 +17,19 @@ namespace np { /// Provides a type that implements 16-bit floating point (half-precision). /// This type is ensured to be 16-bit size. -#if 1 // ndef __ARM_FP16_FORMAT_IEEE class Half final { public: - /// Whether `Half` has a full native HW support. - static constexpr bool kNative = false; - /// Whether `Half` has a native HW support for single/double conversion. - template - static constexpr bool kNativeConversion = ( - ( - std::is_same_v && - #if defined(NPY_HAVE_FP16) || defined(NPY_HAVE_VSX3) - true - #else - false - #endif - ) || ( - std::is_same_v && - #if defined(NPY_HAVE_AVX512FP16) || (defined(NPY_HAVE_VSX3) && defined(NPY_HAVE_VSX3_HALF_DOUBLE)) - true - #else - false - #endif - ) - ); - /// Default constructor. initialize nothing. Half() = default; /// Construct from float /// If there are no hardware optimization available, rounding will always /// be set to ties to even. - explicit Half(float f) + NPY_FINLINE explicit Half(float f) { #if defined(NPY_HAVE_FP16) __m128 mf = _mm_load_ss(&f); - bits_ = static_cast(_mm_cvtsi128_si32(_mm_cvtps_ph(mf, _MM_FROUND_TO_NEAREST_INT))); + bits_ = _mm_extract_epi16(_mm_cvtps_ph(mf, _MM_FROUND_TO_NEAREST_INT), 0); #elif defined(NPY_HAVE_VSX3) && defined(NPY_HAVE_VSX_ASM) __vector float vf32 = vec_splats(f); __vector unsigned short vf16; @@ -64,6 +39,9 @@ class Half final { #else bits_ = vec_extract(vf16, 0); #endif + #elif defined(__ARM_FP16_FORMAT_IEEE) + __fp16 f16 = __fp16(f); + bits_ = BitCast(f16); #else bits_ = half_private::FromFloatBits(BitCast(f)); #endif @@ -72,20 +50,23 @@ class Half final { /// Construct from double. /// If there are no hardware optimization available, rounding will always /// be set to ties to even. - explicit Half(double f) + NPY_FINLINE explicit Half(double f) { #if defined(NPY_HAVE_AVX512FP16) __m128d md = _mm_load_sd(&f); - bits_ = static_cast(_mm_cvtsi128_si32(_mm_castph_si128(_mm_cvtpd_ph(md)))); + bits_ = _mm_extract_epi16(_mm_castph_si128(_mm_cvtpd_ph(md)), 0); #elif defined(NPY_HAVE_VSX3) && defined(NPY_HAVE_VSX3_HALF_DOUBLE) __asm__ __volatile__ ("xscvdphp %x0,%x1" : "=wa" (bits_) : "wa" (f)); + #elif defined(__ARM_FP16_FORMAT_IEEE) + __fp16 f16 = __fp16(f); + bits_ = BitCast(f16); #else bits_ = half_private::FromDoubleBits(BitCast(f)); #endif } /// Cast to float - explicit operator float() const + NPY_FINLINE explicit operator float() const { #if defined(NPY_HAVE_FP16) float ret; @@ -99,13 +80,15 @@ class Half final { : "=wa"(vf32) : "wa"(vec_splats(bits_))); return vec_extract(vf32, 0); + #elif defined(__ARM_FP16_FORMAT_IEEE) + return float(BitCast<__fp16>(bits_)); #else return BitCast(half_private::ToFloatBits(bits_)); #endif } /// Cast to double - explicit operator double() const + NPY_FINLINE explicit operator double() const { #if defined(NPY_HAVE_AVX512FP16) double ret; @@ -117,6 +100,8 @@ class Half final { : "=wa"(f64) : "wa"(bits_)); return f64; + #elif defined(__ARM_FP16_FORMAT_IEEE) + return double(BitCast<__fp16>(bits_)); #else return BitCast(half_private::ToDoubleBits(bits_)); #endif @@ -223,40 +208,6 @@ class Half final { private: uint16_t bits_; }; -#else // __ARM_FP16_FORMAT_IEEE -class Half final { - public: - static constexpr bool kNative = true; - template - static constexpr bool kNativeConversion = ( - std::is_same_v || std::is_same_v - ); - Half() = default; - constexpr Half(__fp16 h) : half_(h) - {} - constexpr operator __fp16() const - { return half_; } - static Half FromBits(uint16_t bits) - { - Half h; - h.half_ = BitCast<__fp16>(bits); - return h; - } - uint16_t Bits() const - { return BitCast(half_); } - constexpr bool Less(Half r) const - { return half_ < r.half_; } - constexpr bool LessEqual(Half r) const - { return half_ <= r.half_; } - constexpr bool Equal(Half r) const - { return half_ == r.half_; } - constexpr bool IsNaN() const - { return half_ != half_; } - - private: - __fp16 half_; -}; -#endif // __ARM_FP16_FORMAT_IEEE /// @} cpp_core_types diff --git a/numpy/_core/src/common/npy_argparse.c b/numpy/_core/src/common/npy_argparse.c index 6766b17043ac..aa011be9c585 100644 --- a/numpy/_core/src/common/npy_argparse.c +++ b/numpy/_core/src/common/npy_argparse.c @@ -243,16 +243,18 @@ static int raise_incorrect_number_of_positional_args(const char *funcname, const _NpyArgParserCache *cache, Py_ssize_t len_args) { + const char *verb = (len_args == 1) ? "was" : "were"; if (cache->npositional == cache->nrequired) { PyErr_Format(PyExc_TypeError, - "%s() takes %d positional arguments but %zd were given", - funcname, cache->npositional, len_args); + "%s() takes %d positional arguments but %zd %s given", + funcname, cache->npositional, len_args, verb); } else { PyErr_Format(PyExc_TypeError, "%s() takes from %d to %d positional arguments but " - "%zd were given", - funcname, cache->nrequired, cache->npositional, len_args); + "%zd %s given", + funcname, cache->nrequired, cache->npositional, + len_args, verb); } return -1; } diff --git a/numpy/_core/src/common/npy_atomic.h b/numpy/_core/src/common/npy_atomic.h index f5b41d7068be..61a31acc13e0 100644 --- a/numpy/_core/src/common/npy_atomic.h +++ b/numpy/_core/src/common/npy_atomic.h @@ -79,6 +79,12 @@ npy_atomic_load_ptr(const void *obj) { #endif } +static inline npy_hash_t +npy_atomic_load_hash_t(const npy_hash_t *obj) { + assert(sizeof(npy_hash_t) == sizeof(void *)); + return (npy_hash_t)npy_atomic_load_ptr((const void *)obj); +} + static inline void npy_atomic_store_uint8(npy_uint8 *obj, npy_uint8 value) { #ifdef STDC_ATOMICS @@ -104,6 +110,12 @@ npy_atomic_store_ptr(void *obj, void *value) #endif } +static inline void +npy_atomic_store_hash_t(npy_hash_t *obj, npy_hash_t value) { + assert(sizeof(npy_hash_t) == sizeof(void *)); + npy_atomic_store_ptr((void *)obj, (void *)value); +} + #undef MSC_ATOMICS #undef STDC_ATOMICS #undef GCC_ATOMICS diff --git a/numpy/_core/src/common/npy_config.h b/numpy/_core/src/common/npy_config.h index 82641a85509e..ccb81ca7110b 100644 --- a/numpy/_core/src/common/npy_config.h +++ b/numpy/_core/src/common/npy_config.h @@ -1,6 +1,11 @@ #ifndef NUMPY_CORE_SRC_COMMON_NPY_CONFIG_H_ #define NUMPY_CORE_SRC_COMMON_NPY_CONFIG_H_ +#if defined(_MSC_VER) +// Suppress warn C4146: -x is valid for unsigned (wraps around) +#pragma warning(disable:4146) +#endif + #include "config.h" #include "npy_cpu_dispatch.h" // brings NPY_HAVE_[CPU features] #include "numpy/numpyconfig.h" diff --git a/numpy/_core/src/common/npy_cpu_dispatch.c b/numpy/_core/src/common/npy_cpu_dispatch.c index ff22f234a7c6..2cb3cd817d2a 100644 --- a/numpy/_core/src/common/npy_cpu_dispatch.c +++ b/numpy/_core/src/common/npy_cpu_dispatch.c @@ -9,7 +9,7 @@ NPY_VISIBILITY_HIDDEN int npy_cpu_dispatch_tracer_init(PyObject *mod) { if (npy_static_pydata.cpu_dispatch_registry != NULL) { - PyErr_Format(PyExc_RuntimeError, "CPU dispatcher tracer already initlized"); + PyErr_Format(PyExc_RuntimeError, "CPU dispatcher tracer already initialized"); return -1; } PyObject *mod_dict = PyModule_GetDict(mod); @@ -33,7 +33,7 @@ NPY_VISIBILITY_HIDDEN void npy_cpu_dispatch_trace(const char *fname, const char *signature, const char **dispatch_info) { - PyObject *func_dict = PyDict_GetItemString(npy_static_pydata.cpu_dispatch_registry, fname); + PyObject *func_dict = PyDict_GetItemString(npy_static_pydata.cpu_dispatch_registry, fname); // noqa: borrowed-ref OK if (func_dict == NULL) { func_dict = PyDict_New(); if (func_dict == NULL) { diff --git a/numpy/_core/src/common/npy_cpu_features.c b/numpy/_core/src/common/npy_cpu_features.c index 8810182812e5..91dafa96de0a 100644 --- a/numpy/_core/src/common/npy_cpu_features.c +++ b/numpy/_core/src/common/npy_cpu_features.c @@ -80,12 +80,23 @@ static struct { {NPY_CPU_FEATURE_SSE41, "SSE41"}, {NPY_CPU_FEATURE_POPCNT, "POPCNT"}, {NPY_CPU_FEATURE_SSE42, "SSE42"}, + {NPY_CPU_FEATURE_X86_V2, "X86_V2"}, {NPY_CPU_FEATURE_AVX, "AVX"}, {NPY_CPU_FEATURE_F16C, "F16C"}, {NPY_CPU_FEATURE_XOP, "XOP"}, {NPY_CPU_FEATURE_FMA4, "FMA4"}, {NPY_CPU_FEATURE_FMA3, "FMA3"}, {NPY_CPU_FEATURE_AVX2, "AVX2"}, + {NPY_CPU_FEATURE_LAHF, "LAHF"}, + {NPY_CPU_FEATURE_CX16, "CX16"}, + {NPY_CPU_FEATURE_MOVBE, "MOVBE"}, + {NPY_CPU_FEATURE_BMI, "BMI"}, + {NPY_CPU_FEATURE_BMI2, "BMI2"}, + {NPY_CPU_FEATURE_LZCNT, "LZCNT"}, + {NPY_CPU_FEATURE_GFNI, "GFNI"}, + {NPY_CPU_FEATURE_VPCLMULQDQ, "VPCLMULQDQ"}, + {NPY_CPU_FEATURE_VAES, "VAES"}, + {NPY_CPU_FEATURE_X86_V3, "X86_V3"}, {NPY_CPU_FEATURE_AVX512F, "AVX512F"}, {NPY_CPU_FEATURE_AVX512CD, "AVX512CD"}, {NPY_CPU_FEATURE_AVX512ER, "AVX512ER"}, @@ -102,9 +113,11 @@ static struct { {NPY_CPU_FEATURE_AVX512VBMI2, "AVX512VBMI2"}, {NPY_CPU_FEATURE_AVX512BITALG, "AVX512BITALG"}, {NPY_CPU_FEATURE_AVX512FP16 , "AVX512FP16"}, + {NPY_CPU_FEATURE_AVX512BF16 , "AVX512BF16"}, {NPY_CPU_FEATURE_AVX512_KNL, "AVX512_KNL"}, {NPY_CPU_FEATURE_AVX512_KNM, "AVX512_KNM"}, {NPY_CPU_FEATURE_AVX512_SKX, "AVX512_SKX"}, + {NPY_CPU_FEATURE_X86_V4, "X86_V4"}, {NPY_CPU_FEATURE_AVX512_CLX, "AVX512_CLX"}, {NPY_CPU_FEATURE_AVX512_CNL, "AVX512_CNL"}, {NPY_CPU_FEATURE_AVX512_ICL, "AVX512_ICL"}, @@ -246,7 +259,7 @@ npy__cpu_validate_baseline(void) static int npy__cpu_check_env(int disable, const char *env) { - static const char *names[] = { + static const char *const names[] = { "enable", "disable", "NPY_ENABLE_CPU_FEATURES", "NPY_DISABLE_CPU_FEATURES", "During parsing environment variable: 'NPY_ENABLE_CPU_FEATURES':\n", @@ -277,7 +290,7 @@ npy__cpu_check_env(int disable, const char *env) { char *notsupp_cur = ¬supp[0]; //comma and space including (htab, vtab, CR, LF, FF) - const char *delim = ", \t\v\r\n\f"; + const char delim[] = ", \t\v\r\n\f"; char *feature = strtok(features, delim); while (feature) { if (npy__cpu_baseline_fid(feature) > 0){ @@ -398,12 +411,18 @@ npy__cpu_getxcr0(void) } static void -npy__cpu_cpuid(int reg[4], int func_id) +npy__cpu_cpuid_count(int reg[4], int func_id, int count) { #if defined(_MSC_VER) - __cpuidex(reg, func_id, 0); + __cpuidex(reg, func_id, count); #elif defined(__INTEL_COMPILER) __cpuid(reg, func_id); + // classic Intel compilers do not support count + if (count != 0) { + for (int i = 0; i < 4; i++) { + reg[i] = 0; + } + } #elif defined(__GNUC__) || defined(__clang__) #if defined(NPY_CPU_X86) && defined(__PIC__) // %ebx may be the PIC register @@ -412,13 +431,13 @@ npy__cpu_cpuid(int reg[4], int func_id) "xchg{l}\t{%%}ebx, %1\n\t" : "=a" (reg[0]), "=r" (reg[1]), "=c" (reg[2]), "=d" (reg[3]) - : "a" (func_id), "c" (0) + : "a" (func_id), "c" (count) ); #else __asm__("cpuid\n\t" : "=a" (reg[0]), "=b" (reg[1]), "=c" (reg[2]), "=d" (reg[3]) - : "a" (func_id), "c" (0) + : "a" (func_id), "c" (count) ); #endif #else @@ -426,6 +445,12 @@ npy__cpu_cpuid(int reg[4], int func_id) #endif } +static void +npy__cpu_cpuid(int reg[4], int func_id) +{ + return npy__cpu_cpuid_count(reg, func_id, 0); +} + static void npy__cpu_init_features(void) { @@ -441,7 +466,13 @@ npy__cpu_init_features(void) #ifdef NPY_CPU_AMD64 npy__cpu_have[NPY_CPU_FEATURE_SSE3] = 1; #endif - return; + // For unsupported compilers, we default to NPY_CPU_X86_V2 availability + // as this is the minimum baseline required to bypass initial capability checks. + // However, we deliberately don't set any additional CPU feature flags, + // allowing us to detect this fallback behavior later via the Python + // __cpu_features__ dictionary. + npy__cpu_have[NPY_CPU_FEATURE_X86_V2] = 1; + return; } npy__cpu_cpuid(reg, 1); @@ -453,36 +484,42 @@ npy__cpu_init_features(void) npy__cpu_have[NPY_CPU_FEATURE_SSE41] = (reg[2] & (1 << 19)) != 0; npy__cpu_have[NPY_CPU_FEATURE_POPCNT] = (reg[2] & (1 << 23)) != 0; npy__cpu_have[NPY_CPU_FEATURE_SSE42] = (reg[2] & (1 << 20)) != 0; + npy__cpu_have[NPY_CPU_FEATURE_CX16] = (reg[2] & (1 << 13)) != 0; npy__cpu_have[NPY_CPU_FEATURE_F16C] = (reg[2] & (1 << 29)) != 0; + npy__cpu_have[NPY_CPU_FEATURE_MOVBE] = (reg[2] & (1 << 22)) != 0; - // check OSXSAVE - if ((reg[2] & (1 << 27)) == 0) - return; - // check AVX OS support - int xcr = npy__cpu_getxcr0(); - if ((xcr & 6) != 6) - return; - npy__cpu_have[NPY_CPU_FEATURE_AVX] = (reg[2] & (1 << 28)) != 0; - if (!npy__cpu_have[NPY_CPU_FEATURE_AVX]) - return; + int osxsave = (reg[2] & (1 << 27)) != 0; + int xcr = 0; + if (osxsave) { + xcr = npy__cpu_getxcr0(); + } + int avx_os = (xcr & 6) == 6; + npy__cpu_have[NPY_CPU_FEATURE_AVX] = (reg[2] & (1 << 28)) != 0 && avx_os; npy__cpu_have[NPY_CPU_FEATURE_FMA3] = (reg[2] & (1 << 12)) != 0; // second call to the cpuid to get extended AMD feature bits npy__cpu_cpuid(reg, 0x80000001); - npy__cpu_have[NPY_CPU_FEATURE_XOP] = (reg[2] & (1 << 11)) != 0; - npy__cpu_have[NPY_CPU_FEATURE_FMA4] = (reg[2] & (1 << 16)) != 0; +#ifdef NPY_CPU_AMD64 + // long mode only + npy__cpu_have[NPY_CPU_FEATURE_LAHF] = (reg[2] & (1 << 0)) != 0; +#else + // alawys available + npy__cpu_have[NPY_CPU_FEATURE_LAHF] = 1; +#endif + npy__cpu_have[NPY_CPU_FEATURE_LZCNT] = (reg[2] & (1 << 5)) != 0; + npy__cpu_have[NPY_CPU_FEATURE_POPCNT] |= npy__cpu_have[NPY_CPU_FEATURE_LZCNT]; + npy__cpu_have[NPY_CPU_FEATURE_XOP] = (reg[2] & (1 << 11)) != 0 && npy__cpu_have[NPY_CPU_FEATURE_AVX]; + npy__cpu_have[NPY_CPU_FEATURE_FMA4] = (reg[2] & (1 << 16)) != 0 && npy__cpu_have[NPY_CPU_FEATURE_AVX]; // third call to the cpuid to get extended AVX2 & AVX512 feature bits npy__cpu_cpuid(reg, 7); - npy__cpu_have[NPY_CPU_FEATURE_AVX2] = (reg[1] & (1 << 5)) != 0; - npy__cpu_have[NPY_CPU_FEATURE_AVX2] = npy__cpu_have[NPY_CPU_FEATURE_AVX2] && - npy__cpu_have[NPY_CPU_FEATURE_FMA3]; - if (!npy__cpu_have[NPY_CPU_FEATURE_AVX2]) - return; - // detect AVX2 & FMA3 - npy__cpu_have[NPY_CPU_FEATURE_FMA] = npy__cpu_have[NPY_CPU_FEATURE_FMA3]; + npy__cpu_have[NPY_CPU_FEATURE_AVX2] = (reg[1] & (1 << 5)) != 0 && npy__cpu_have[NPY_CPU_FEATURE_AVX]; + npy__cpu_have[NPY_CPU_FEATURE_BMI] = (reg[1] & (1 << 3)) != 0; + npy__cpu_have[NPY_CPU_FEATURE_BMI2] = (reg[1] & (1 << 8)) != 0 && npy__cpu_have[NPY_CPU_FEATURE_BMI]; + npy__cpu_have[NPY_CPU_FEATURE_GFNI] = (reg[2] & (1 << 8)) != 0; + npy__cpu_have[NPY_CPU_FEATURE_VAES] = (reg[2] & (1 << 9)) != 0; + npy__cpu_have[NPY_CPU_FEATURE_VPCLMULQDQ] = (reg[2] & (1 << 10)) != 0; - // check AVX512 OS support int avx512_os = (xcr & 0xe6) == 0xe6; #if defined(__APPLE__) && defined(__x86_64__) /** @@ -494,7 +531,7 @@ npy__cpu_init_features(void) * - https://github.com/golang/go/issues/43089 * - https://github.com/numpy/numpy/issues/19319 */ - if (!avx512_os) { + if (!avx512_os && avx_os) { npy_uintp commpage64_addr = 0x00007fffffe00000ULL; npy_uint16 commpage64_ver = *((npy_uint16*)(commpage64_addr + 0x01E)); // cpu_capabilities64 undefined in versions < 13 @@ -504,65 +541,110 @@ npy__cpu_init_features(void) } } #endif - if (!avx512_os) { - return; - } - npy__cpu_have[NPY_CPU_FEATURE_AVX512F] = (reg[1] & (1 << 16)) != 0; - npy__cpu_have[NPY_CPU_FEATURE_AVX512CD] = (reg[1] & (1 << 28)) != 0; - if (npy__cpu_have[NPY_CPU_FEATURE_AVX512F] && npy__cpu_have[NPY_CPU_FEATURE_AVX512CD]) { + npy__cpu_have[NPY_CPU_FEATURE_AVX512F] = (reg[1] & (1 << 16)) != 0 && avx512_os; + if (npy__cpu_have[NPY_CPU_FEATURE_AVX512F]) { + npy__cpu_have[NPY_CPU_FEATURE_AVX512CD] = (reg[1] & (1 << 28)) != 0; // Knights Landing npy__cpu_have[NPY_CPU_FEATURE_AVX512PF] = (reg[1] & (1 << 26)) != 0; npy__cpu_have[NPY_CPU_FEATURE_AVX512ER] = (reg[1] & (1 << 27)) != 0; - npy__cpu_have[NPY_CPU_FEATURE_AVX512_KNL] = npy__cpu_have[NPY_CPU_FEATURE_AVX512ER] && - npy__cpu_have[NPY_CPU_FEATURE_AVX512PF]; // Knights Mill npy__cpu_have[NPY_CPU_FEATURE_AVX512VPOPCNTDQ] = (reg[2] & (1 << 14)) != 0; npy__cpu_have[NPY_CPU_FEATURE_AVX5124VNNIW] = (reg[3] & (1 << 2)) != 0; npy__cpu_have[NPY_CPU_FEATURE_AVX5124FMAPS] = (reg[3] & (1 << 3)) != 0; - npy__cpu_have[NPY_CPU_FEATURE_AVX512_KNM] = npy__cpu_have[NPY_CPU_FEATURE_AVX512_KNL] && - npy__cpu_have[NPY_CPU_FEATURE_AVX5124FMAPS] && - npy__cpu_have[NPY_CPU_FEATURE_AVX5124VNNIW] && - npy__cpu_have[NPY_CPU_FEATURE_AVX512VPOPCNTDQ]; - // Skylake-X npy__cpu_have[NPY_CPU_FEATURE_AVX512DQ] = (reg[1] & (1 << 17)) != 0; npy__cpu_have[NPY_CPU_FEATURE_AVX512BW] = (reg[1] & (1 << 30)) != 0; - npy__cpu_have[NPY_CPU_FEATURE_AVX512VL] = (reg[1] & (1 << 31)) != 0; - npy__cpu_have[NPY_CPU_FEATURE_AVX512_SKX] = npy__cpu_have[NPY_CPU_FEATURE_AVX512BW] && - npy__cpu_have[NPY_CPU_FEATURE_AVX512DQ] && - npy__cpu_have[NPY_CPU_FEATURE_AVX512VL]; + // cast and use of unsigned int literal silences UBSan warning: + // "runtime error: left shift of 1 by 31 places cannot be represented in type 'int'" + npy__cpu_have[NPY_CPU_FEATURE_AVX512VL] = (reg[1] & (int)(1u << 31)) != 0; // Cascade Lake npy__cpu_have[NPY_CPU_FEATURE_AVX512VNNI] = (reg[2] & (1 << 11)) != 0; - npy__cpu_have[NPY_CPU_FEATURE_AVX512_CLX] = npy__cpu_have[NPY_CPU_FEATURE_AVX512_SKX] && - npy__cpu_have[NPY_CPU_FEATURE_AVX512VNNI]; - // Cannon Lake npy__cpu_have[NPY_CPU_FEATURE_AVX512IFMA] = (reg[1] & (1 << 21)) != 0; npy__cpu_have[NPY_CPU_FEATURE_AVX512VBMI] = (reg[2] & (1 << 1)) != 0; - npy__cpu_have[NPY_CPU_FEATURE_AVX512_CNL] = npy__cpu_have[NPY_CPU_FEATURE_AVX512_SKX] && - npy__cpu_have[NPY_CPU_FEATURE_AVX512IFMA] && - npy__cpu_have[NPY_CPU_FEATURE_AVX512VBMI]; // Ice Lake npy__cpu_have[NPY_CPU_FEATURE_AVX512VBMI2] = (reg[2] & (1 << 6)) != 0; npy__cpu_have[NPY_CPU_FEATURE_AVX512BITALG] = (reg[2] & (1 << 12)) != 0; - npy__cpu_have[NPY_CPU_FEATURE_AVX512_ICL] = npy__cpu_have[NPY_CPU_FEATURE_AVX512_CLX] && - npy__cpu_have[NPY_CPU_FEATURE_AVX512_CNL] && - npy__cpu_have[NPY_CPU_FEATURE_AVX512VBMI2] && - npy__cpu_have[NPY_CPU_FEATURE_AVX512BITALG] && - npy__cpu_have[NPY_CPU_FEATURE_AVX512VPOPCNTDQ]; // Sapphire Rapids - npy__cpu_have[NPY_CPU_FEATURE_AVX512FP16] = (reg[3] & (1 << 23)) != 0; - npy__cpu_have[NPY_CPU_FEATURE_AVX512_SPR] = npy__cpu_have[NPY_CPU_FEATURE_AVX512_ICL] && - npy__cpu_have[NPY_CPU_FEATURE_AVX512FP16]; - + npy__cpu_have[NPY_CPU_FEATURE_AVX512FP16] = (reg[3] & (1 << 23)) != 0; + npy__cpu_cpuid_count(reg, 7, 1); + npy__cpu_have[NPY_CPU_FEATURE_AVX512BF16] = (reg[0] & (1 << 5)) != 0; } + + // Groups + npy__cpu_have[NPY_CPU_FEATURE_X86_V2] = npy__cpu_have[NPY_CPU_FEATURE_SSE] && + npy__cpu_have[NPY_CPU_FEATURE_SSE2] && + npy__cpu_have[NPY_CPU_FEATURE_SSE3] && + npy__cpu_have[NPY_CPU_FEATURE_SSSE3] && + npy__cpu_have[NPY_CPU_FEATURE_SSE41] && + npy__cpu_have[NPY_CPU_FEATURE_SSE42] && + npy__cpu_have[NPY_CPU_FEATURE_POPCNT] && + #ifdef NPY_CPU_AMD64 + npy__cpu_have[NPY_CPU_FEATURE_CX16] && + #endif + npy__cpu_have[NPY_CPU_FEATURE_LAHF]; + + npy__cpu_have[NPY_CPU_FEATURE_X86_V3] = npy__cpu_have[NPY_CPU_FEATURE_X86_V2] && + npy__cpu_have[NPY_CPU_FEATURE_AVX] && + npy__cpu_have[NPY_CPU_FEATURE_AVX2] && + npy__cpu_have[NPY_CPU_FEATURE_F16C] && + npy__cpu_have[NPY_CPU_FEATURE_FMA3] && + npy__cpu_have[NPY_CPU_FEATURE_BMI] && + npy__cpu_have[NPY_CPU_FEATURE_BMI2] && + npy__cpu_have[NPY_CPU_FEATURE_LZCNT] && + npy__cpu_have[NPY_CPU_FEATURE_MOVBE]; + + + npy__cpu_have[NPY_CPU_FEATURE_X86_V4] = npy__cpu_have[NPY_CPU_FEATURE_X86_V3] && + npy__cpu_have[NPY_CPU_FEATURE_AVX512F] && + npy__cpu_have[NPY_CPU_FEATURE_AVX512CD] && + npy__cpu_have[NPY_CPU_FEATURE_AVX512BW] && + npy__cpu_have[NPY_CPU_FEATURE_AVX512DQ] && + npy__cpu_have[NPY_CPU_FEATURE_AVX512VL]; + + + npy__cpu_have[NPY_CPU_FEATURE_AVX512_ICL] = npy__cpu_have[NPY_CPU_FEATURE_X86_V4] && + npy__cpu_have[NPY_CPU_FEATURE_AVX512VNNI] && + npy__cpu_have[NPY_CPU_FEATURE_AVX512IFMA] && + npy__cpu_have[NPY_CPU_FEATURE_AVX512VBMI] && + npy__cpu_have[NPY_CPU_FEATURE_AVX512VBMI2] && + npy__cpu_have[NPY_CPU_FEATURE_AVX512BITALG] && + npy__cpu_have[NPY_CPU_FEATURE_AVX512VPOPCNTDQ] && + npy__cpu_have[NPY_CPU_FEATURE_GFNI] && + npy__cpu_have[NPY_CPU_FEATURE_VAES] && + npy__cpu_have[NPY_CPU_FEATURE_VPCLMULQDQ]; + + npy__cpu_have[NPY_CPU_FEATURE_AVX512_SPR] = npy__cpu_have[NPY_CPU_FEATURE_AVX512_ICL] && + npy__cpu_have[NPY_CPU_FEATURE_AVX512FP16] && + npy__cpu_have[NPY_CPU_FEATURE_AVX512BF16]; + + + + // Legacy groups + npy__cpu_have[NPY_CPU_FEATURE_AVX512_KNL] = npy__cpu_have[NPY_CPU_FEATURE_AVX512F] && + npy__cpu_have[NPY_CPU_FEATURE_AVX512CD] && + npy__cpu_have[NPY_CPU_FEATURE_AVX512ER] && + npy__cpu_have[NPY_CPU_FEATURE_AVX512PF]; + + npy__cpu_have[NPY_CPU_FEATURE_AVX512_KNM] = npy__cpu_have[NPY_CPU_FEATURE_AVX512_KNL] && + npy__cpu_have[NPY_CPU_FEATURE_AVX5124FMAPS] && + npy__cpu_have[NPY_CPU_FEATURE_AVX5124VNNIW] && + npy__cpu_have[NPY_CPU_FEATURE_AVX512VPOPCNTDQ]; + + npy__cpu_have[NPY_CPU_FEATURE_AVX512_CLX] = npy__cpu_have[NPY_CPU_FEATURE_X86_V4] && + npy__cpu_have[NPY_CPU_FEATURE_AVX512VNNI]; + + npy__cpu_have[NPY_CPU_FEATURE_AVX512_CNL] = npy__cpu_have[NPY_CPU_FEATURE_X86_V4] && + npy__cpu_have[NPY_CPU_FEATURE_AVX512IFMA] && + npy__cpu_have[NPY_CPU_FEATURE_AVX512VBMI]; + } /***************** POWER ******************/ #elif defined(NPY_CPU_PPC64) || defined(NPY_CPU_PPC64LE) -#if defined(__linux__) || defined(__FreeBSD__) +#if defined(__linux__) || defined(__FreeBSD__) || defined(__OpenBSD__) #ifdef __FreeBSD__ #include // defines PPC_FEATURE_HAS_VSX #endif @@ -585,7 +667,7 @@ static void npy__cpu_init_features(void) { memset(npy__cpu_have, 0, sizeof(npy__cpu_have[0]) * NPY_CPU_FEATURE_MAX); -#if defined(__linux__) || defined(__FreeBSD__) +#if defined(__linux__) || defined(__FreeBSD__) || defined(__OpenBSD__) #ifdef __linux__ unsigned int hwcap = getauxval(AT_HWCAP); if ((hwcap & PPC_FEATURE_HAS_VSX) == 0) @@ -612,7 +694,7 @@ npy__cpu_init_features(void) npy__cpu_have[NPY_CPU_FEATURE_VSX2] = (hwcap & PPC_FEATURE2_ARCH_2_07) != 0; npy__cpu_have[NPY_CPU_FEATURE_VSX3] = (hwcap & PPC_FEATURE2_ARCH_3_00) != 0; npy__cpu_have[NPY_CPU_FEATURE_VSX4] = (hwcap & PPC_FEATURE2_ARCH_3_1) != 0; -// TODO: AIX, OpenBSD +// TODO: AIX #else npy__cpu_have[NPY_CPU_FEATURE_VSX] = 1; #if defined(NPY_CPU_PPC64LE) || defined(NPY_HAVE_VSX2) @@ -698,7 +780,7 @@ npy__cpu_init_features_arm8(void) npy__cpu_have[NPY_CPU_FEATURE_ASIMD] = 1; } -#if defined(__linux__) || defined(__FreeBSD__) +#if defined(__linux__) || defined(__FreeBSD__) || defined(__OpenBSD__) /* * we aren't sure of what kind kernel or clib we deal with * so we play it safe @@ -709,7 +791,7 @@ npy__cpu_init_features_arm8(void) #if defined(__linux__) __attribute__((weak)) unsigned long getauxval(unsigned long); // linker should handle it #endif -#ifdef __FreeBSD__ +#if defined(__FreeBSD__) || defined(__OpenBSD__) __attribute__((weak)) int elf_aux_info(int, void *, int); // linker should handle it static unsigned long getauxval(unsigned long k) @@ -807,7 +889,7 @@ static void npy__cpu_init_features(void) { memset(npy__cpu_have, 0, sizeof(npy__cpu_have[0]) * NPY_CPU_FEATURE_MAX); -#ifdef __linux__ +#if defined(__linux__) || defined(__FreeBSD__) || defined(__OpenBSD__) if (npy__cpu_init_features_linux()) return; #endif @@ -846,22 +928,30 @@ npy__cpu_init_features(void) #elif defined(__riscv) && __riscv_xlen == 64 -#include +#if defined(__linux__) || defined(__FreeBSD__) || defined(__OpenBSD__) + #include -#ifndef HWCAP_RVV - // https://github.com/torvalds/linux/blob/v6.8/arch/riscv/include/uapi/asm/hwcap.h#L24 - #define COMPAT_HWCAP_ISA_V (1 << ('V' - 'A')) + #ifndef HWCAP_RVV + // https://github.com/torvalds/linux/blob/v6.8/arch/riscv/include/uapi/asm/hwcap.h#L24 + #define COMPAT_HWCAP_ISA_V (1 << ('V' - 'A')) + #endif #endif static void npy__cpu_init_features(void) { memset(npy__cpu_have, 0, sizeof(npy__cpu_have[0]) * NPY_CPU_FEATURE_MAX); - +#if defined(__linux__) || defined(__FreeBSD__) || defined(__OpenBSD__) +#ifdef __linux__ unsigned int hwcap = getauxval(AT_HWCAP); +#else + unsigned long hwcap; + elf_aux_info(AT_HWCAP, &hwcap, sizeof(hwcap)); +#endif if (hwcap & COMPAT_HWCAP_ISA_V) { npy__cpu_have[NPY_CPU_FEATURE_RVV] = 1; } +#endif } /*********** Unsupported ARCH ***********/ diff --git a/numpy/_core/src/common/npy_cpu_features.h b/numpy/_core/src/common/npy_cpu_features.h index 7d6a406f8789..de05a17afdb8 100644 --- a/numpy/_core/src/common/npy_cpu_features.h +++ b/numpy/_core/src/common/npy_cpu_features.h @@ -26,8 +26,15 @@ enum npy_cpu_features NPY_CPU_FEATURE_FMA4 = 12, NPY_CPU_FEATURE_FMA3 = 13, NPY_CPU_FEATURE_AVX2 = 14, - NPY_CPU_FEATURE_FMA = 15, // AVX2 & FMA3, provides backward compatibility - + NPY_CPU_FEATURE_LAHF = 15, + NPY_CPU_FEATURE_CX16 = 16, + NPY_CPU_FEATURE_MOVBE = 17, + NPY_CPU_FEATURE_BMI = 18, + NPY_CPU_FEATURE_BMI2 = 19, + NPY_CPU_FEATURE_LZCNT = 20, + NPY_CPU_FEATURE_GFNI = 21, + NPY_CPU_FEATURE_VAES = 22, + NPY_CPU_FEATURE_VPCLMULQDQ = 23, NPY_CPU_FEATURE_AVX512F = 30, NPY_CPU_FEATURE_AVX512CD = 31, NPY_CPU_FEATURE_AVX512ER = 32, @@ -44,6 +51,8 @@ enum npy_cpu_features NPY_CPU_FEATURE_AVX512VBMI2 = 43, NPY_CPU_FEATURE_AVX512BITALG = 44, NPY_CPU_FEATURE_AVX512FP16 = 45, + NPY_CPU_FEATURE_AVX512BF16 = 46, + // X86 CPU Groups // Knights Landing (F,CD,ER,PF) @@ -56,10 +65,17 @@ enum npy_cpu_features NPY_CPU_FEATURE_AVX512_CLX = 104, // Cannon Lake (F,CD,BW,DQ,VL,IFMA,VBMI) NPY_CPU_FEATURE_AVX512_CNL = 105, - // Ice Lake (F,CD,BW,DQ,VL,IFMA,VBMI,VNNI,VBMI2,BITALG,VPOPCNTDQ) + // Ice Lake (F,CD,BW,DQ,VL,IFMA,VBMI,VNNI,VBMI2,BITALG,VPOPCNTDQ,GFNI,VPCLMULDQ,VAES) NPY_CPU_FEATURE_AVX512_ICL = 106, - // Sapphire Rapids (Ice Lake, AVX512FP16) + // Sapphire Rapids (Ice Lake, AVX512FP16, AVX512BF16) NPY_CPU_FEATURE_AVX512_SPR = 107, + // x86-64-v2 microarchitectures (SSE[1-4.*], POPCNT, LAHF, CX16) + // On 32-bit, cx16 is not available so it is not included + NPY_CPU_FEATURE_X86_V2 = 108, + // x86-64-v3 microarchitectures (X86_V2, AVX, AVX2, FMA3, BMI, BMI2, LZCNT, F16C, MOVBE) + NPY_CPU_FEATURE_X86_V3 = 109, + // x86-64-v4 microarchitectures (X86_V3, AVX512F, AVX512CD, AVX512VL, AVX512BW, AVX512DQ) + NPY_CPU_FEATURE_X86_V4 = NPY_CPU_FEATURE_AVX512_SKX, // IBM/POWER VSX // POWER7 diff --git a/numpy/_core/src/common/npy_hashtable.cpp b/numpy/_core/src/common/npy_hashtable.cpp index ffd67d403853..27e014ca00e0 100644 --- a/numpy/_core/src/common/npy_hashtable.cpp +++ b/numpy/_core/src/common/npy_hashtable.cpp @@ -240,3 +240,20 @@ PyArrayIdentityHash_GetItem(PyArrayIdentityHash *tb, PyObject *const *key) PyObject *res = find_item(tb, key)[0]; return res; } + +#ifdef Py_GIL_DISABLED + +NPY_NO_EXPORT PyObject * +PyArrayIdentityHash_GetItemWithLock(PyArrayIdentityHash *tb, PyObject *const *key) +{ + PyObject *res; + std::shared_mutex *mutex = (std::shared_mutex *)tb->mutex; + NPY_BEGIN_ALLOW_THREADS + mutex->lock_shared(); + NPY_END_ALLOW_THREADS + res = find_item(tb, key)[0]; + mutex->unlock_shared(); + return res; +} + +#endif // Py_GIL_DISABLED diff --git a/numpy/_core/src/common/npy_hashtable.h b/numpy/_core/src/common/npy_hashtable.h index cd061ba6fa11..02acc12d3191 100644 --- a/numpy/_core/src/common/npy_hashtable.h +++ b/numpy/_core/src/common/npy_hashtable.h @@ -18,7 +18,7 @@ typedef struct { npy_intp size; /* current size */ npy_intp nelem; /* number of elements */ #ifdef Py_GIL_DISABLED - void *mutex; + void *mutex; /* std::shared_mutex, prevents races to fill the cache */ #endif } PyArrayIdentityHash; @@ -27,6 +27,11 @@ NPY_NO_EXPORT int PyArrayIdentityHash_SetItem(PyArrayIdentityHash *tb, PyObject *const *key, PyObject *value, int replace); +#ifdef Py_GIL_DISABLED +NPY_NO_EXPORT PyObject * +PyArrayIdentityHash_GetItemWithLock(PyArrayIdentityHash *tb, PyObject *const *key); +#endif // Py_GIL_DISABLED + NPY_NO_EXPORT PyObject * PyArrayIdentityHash_GetItem(PyArrayIdentityHash *tb, PyObject *const *key); diff --git a/numpy/_core/src/common/npy_import.c b/numpy/_core/src/common/npy_import.c index cff071e9b522..a0308ff3e4c7 100644 --- a/numpy/_core/src/common/npy_import.c +++ b/numpy/_core/src/common/npy_import.c @@ -19,3 +19,44 @@ init_import_mutex(void) { #endif return 0; } + + +/*! \brief Import a Python object from an entry point string. + + * The name should be of the form "(module ':')? (object '.')* attr". + * If no module is present, it is assumed to be "numpy". + * On error, returns NULL. + */ +NPY_NO_EXPORT PyObject* +npy_import_entry_point(const char *entry_point) { + PyObject *result; + const char *item; + + const char *colon = strchr(entry_point, ':'); + if (colon) { // there is a module. + result = PyUnicode_FromStringAndSize(entry_point, colon - entry_point); + if (result != NULL) { + Py_SETREF(result, PyImport_Import(result)); + } + item = colon + 1; + } + else { + result = PyImport_ImportModule("numpy"); + item = entry_point; + } + + const char *dot = item - 1; + while (result != NULL && dot != NULL) { + item = dot + 1; + dot = strchr(item, '.'); + PyObject *string = PyUnicode_FromStringAndSize( + item, dot ? dot - item : strlen(item)); + if (string == NULL) { + Py_DECREF(result); + return NULL; + } + Py_SETREF(result, PyObject_GetAttr(result, string)); + Py_DECREF(string); + } + return result; +} diff --git a/numpy/_core/src/common/npy_import.h b/numpy/_core/src/common/npy_import.h index 970efa8f549e..fec1b22f3975 100644 --- a/numpy/_core/src/common/npy_import.h +++ b/numpy/_core/src/common/npy_import.h @@ -6,6 +6,10 @@ #include "numpy/npy_common.h" #include "npy_atomic.h" +#ifdef __cplusplus +extern "C" { +#endif + /* * Cached references to objects obtained via an import. All of these are * can be initialized at any time by npy_cache_import_runtime. @@ -41,9 +45,13 @@ typedef struct npy_runtime_imports_struct { PyObject *_std; PyObject *_sum; PyObject *_ufunc_doc_signature_formatter; + PyObject *_ufunc_inspect_signature_builder; + PyObject *_usefields; PyObject *_var; PyObject *_view_is_safe; PyObject *_void_scalar_to_string; + PyObject *sort; + PyObject *argsort; } npy_runtime_imports_struct; NPY_VISIBILITY_HIDDEN extern npy_runtime_imports_struct npy_runtime_imports; @@ -105,10 +113,23 @@ npy_cache_import_runtime(const char *module, const char *attr, PyObject **obj) { #endif Py_DECREF(value); } - return 0; + return 0; } NPY_NO_EXPORT int init_import_mutex(void); +/*! \brief Import a Python object from an entry point string. + + * The name should be of the form "(module ':')? (object '.')* attr". + * If no module is present, it is assumed to be "numpy". + * On error, returns NULL. + */ +NPY_NO_EXPORT PyObject* +npy_import_entry_point(const char *entry_point); + +#ifdef __cplusplus +} +#endif + #endif /* NUMPY_CORE_SRC_COMMON_NPY_IMPORT_H_ */ diff --git a/numpy/_core/src/common/npy_longdouble.c b/numpy/_core/src/common/npy_longdouble.c index ce80a9ae2bc3..644af776f9a9 100644 --- a/numpy/_core/src/common/npy_longdouble.c +++ b/numpy/_core/src/common/npy_longdouble.c @@ -144,8 +144,8 @@ npy_longdouble_from_PyLong(PyObject *long_obj) { result = NumPyOS_ascii_strtold(cstr, &end); if (errno == ERANGE) { /* strtold returns INFINITY of the correct sign. */ - if (PyErr_Warn(PyExc_RuntimeWarning, - "overflow encountered in conversion from python long") < 0) { + if (PyErr_WarnEx(PyExc_RuntimeWarning, + "overflow encountered in conversion from python long", 1) < 0) { goto fail; } } diff --git a/numpy/_core/src/common/npy_pycompat.h b/numpy/_core/src/common/npy_pycompat.h index 769b90215f2b..605833a511b7 100644 --- a/numpy/_core/src/common/npy_pycompat.h +++ b/numpy/_core/src/common/npy_pycompat.h @@ -6,4 +6,37 @@ #define Npy_HashDouble _Py_HashDouble +#ifdef Py_GIL_DISABLED +// Specialized version of critical section locking to safely use +// PySequence_Fast APIs without the GIL. For performance, the argument *to* +// PySequence_Fast() is provided to the macro, not the *result* of +// PySequence_Fast(), which would require an extra test to determine if the +// lock must be acquired. +// +// These are tweaked versions of macros defined in CPython in +// pycore_critical_section.h, originally added in CPython commit baf347d91643. +// They should behave identically to the versions in CPython. Once the +// macros are expanded, the only difference relative to those versions is the +// use of public C API symbols that are equivalent to the ones used in the +// corresponding CPython definitions. +#define NPY_BEGIN_CRITICAL_SECTION_SEQUENCE_FAST(original) \ + { \ + PyObject *_orig_seq = (PyObject *)(original); \ + const int _should_lock_cs = \ + PyList_CheckExact(_orig_seq); \ + PyCriticalSection _cs_fast; \ + if (_should_lock_cs) { \ + PyCriticalSection_Begin(&_cs_fast, _orig_seq); \ + } +#define NPY_END_CRITICAL_SECTION_SEQUENCE_FAST() \ + if (_should_lock_cs) { \ + PyCriticalSection_End(&_cs_fast); \ + } \ + } +#else +#define NPY_BEGIN_CRITICAL_SECTION_SEQUENCE_FAST(original) { +#define NPY_END_CRITICAL_SECTION_SEQUENCE_FAST() } +#endif + + #endif /* NUMPY_CORE_SRC_COMMON_NPY_PYCOMPAT_H_ */ diff --git a/numpy/_core/src/common/npy_sort.c b/numpy/_core/src/common/npy_sort.c new file mode 100644 index 000000000000..632962e884dd --- /dev/null +++ b/numpy/_core/src/common/npy_sort.c @@ -0,0 +1,67 @@ +#include +#include +#include +#include "npy_sort.h" +#include "dtypemeta.h" + +#ifdef __cplusplus +extern "C" { +#endif + +NPY_NO_EXPORT int +npy_default_sort_loop(PyArrayMethod_Context *context, + char *const *data, const npy_intp *dimensions, const npy_intp *strides, + NpyAuxData *transferdata) +{ + PyArray_CompareFunc *cmp = (PyArray_CompareFunc *)context->method->static_data; + + PyArrayMethod_SortParameters *sort_params = + (PyArrayMethod_SortParameters *)context->parameters; + PyArray_SortImpl *sort_func = NULL; + + switch (sort_params->flags) { + case NPY_SORT_DEFAULT: + sort_func = npy_quicksort_impl; + break; + case NPY_SORT_STABLE: + sort_func = npy_mergesort_impl; + break; + default: + PyErr_SetString(PyExc_ValueError, "Invalid sort kind"); + return -1; + } + + return sort_func(data[0], dimensions[0], context, + context->descriptors[0]->elsize, cmp); +} + +NPY_NO_EXPORT int +npy_default_argsort_loop(PyArrayMethod_Context *context, + char *const *data, const npy_intp *dimensions, const npy_intp *strides, + NpyAuxData *transferdata) +{ + PyArray_CompareFunc *cmp = (PyArray_CompareFunc *)context->method->static_data; + + PyArrayMethod_SortParameters *sort_params = + (PyArrayMethod_SortParameters *)context->parameters; + PyArray_ArgSortImpl *argsort_func = NULL; + + switch (sort_params->flags) { + case NPY_SORT_DEFAULT: + argsort_func = npy_aquicksort_impl; + break; + case NPY_SORT_STABLE: + argsort_func = npy_amergesort_impl; + break; + default: + PyErr_SetString(PyExc_ValueError, "Invalid sort kind"); + return -1; + } + + return argsort_func(data[0], (npy_intp *)data[1], dimensions[0], context, + context->descriptors[0]->elsize, cmp); +} + +#ifdef __cplusplus +} +#endif diff --git a/numpy/_core/src/common/npy_sort.h.src b/numpy/_core/src/common/npy_sort.h.src index d6e4357225a8..95d6f9d1ee70 100644 --- a/numpy/_core/src/common/npy_sort.h.src +++ b/numpy/_core/src/common/npy_sort.h.src @@ -5,6 +5,7 @@ #include #include #include +#include #define NPY_ENOMEM 1 #define NPY_ECOMP 2 @@ -107,6 +108,41 @@ NPY_NO_EXPORT int npy_aheapsort(void *vec, npy_intp *ind, npy_intp cnt, void *ar NPY_NO_EXPORT int npy_amergesort(void *vec, npy_intp *ind, npy_intp cnt, void *arr); NPY_NO_EXPORT int npy_atimsort(void *vec, npy_intp *ind, npy_intp cnt, void *arr); +/* + ***************************************************************************** + ** NEW-STYLE GENERIC SORT ** + ***************************************************************************** + */ + +NPY_NO_EXPORT int npy_default_sort_loop(PyArrayMethod_Context *context, + char *const *data, const npy_intp *dimensions, const npy_intp *strides, + NpyAuxData *transferdata); +NPY_NO_EXPORT int npy_default_argsort_loop(PyArrayMethod_Context *context, + char *const *data, const npy_intp *dimensions, const npy_intp *strides, + NpyAuxData *transferdata); + +/* + ***************************************************************************** + ** GENERIC SORT IMPLEMENTATIONS ** + ***************************************************************************** + */ + +typedef int (PyArray_SortImpl)(void *start, npy_intp num, void *varr, + npy_intp elsize, PyArray_CompareFunc *cmp); +typedef int (PyArray_ArgSortImpl)(void *vv, npy_intp *tosort, npy_intp n, + void *varr, npy_intp elsize, + PyArray_CompareFunc *cmp); + +NPY_NO_EXPORT int npy_quicksort_impl(void *start, npy_intp num, void *varr, + npy_intp elsize, PyArray_CompareFunc *cmp); +NPY_NO_EXPORT int npy_mergesort_impl(void *start, npy_intp num, void *varr, + npy_intp elsize, PyArray_CompareFunc *cmp); +NPY_NO_EXPORT int npy_aquicksort_impl(void *vv, npy_intp *tosort, npy_intp num, void *varr, + npy_intp elsize, PyArray_CompareFunc *cmp); +NPY_NO_EXPORT int npy_amergesort_impl(void *v, npy_intp *tosort, npy_intp num, void *varr, + npy_intp elsize, PyArray_CompareFunc *cmp); + + #ifdef __cplusplus } #endif diff --git a/numpy/_core/src/common/pythoncapi-compat b/numpy/_core/src/common/pythoncapi-compat index 0f1d42a10a3f..90c06a4cae55 160000 --- a/numpy/_core/src/common/pythoncapi-compat +++ b/numpy/_core/src/common/pythoncapi-compat @@ -1 +1 @@ -Subproject commit 0f1d42a10a3f594ad48894912396df31b2c2d55d +Subproject commit 90c06a4cae557bdbfa4f231a781d2b5c1a8f6d1c diff --git a/numpy/_core/src/common/raii_utils.hpp b/numpy/_core/src/common/raii_utils.hpp new file mode 100644 index 000000000000..1049e97387f0 --- /dev/null +++ b/numpy/_core/src/common/raii_utils.hpp @@ -0,0 +1,171 @@ +#ifndef NUMPY_CORE_SRC_COMMON_RAII_UTILS_HPP_ +#define NUMPY_CORE_SRC_COMMON_RAII_UTILS_HPP_ + +// +// Utilities for RAII management of resources. +// +// Another (and arguably clearer) name for this resource management pattern +// is "Scope-Bound Resource Management", but RAII is much more common, so we +// use the familiar acronym. +// + +#include + +// For npy_string_allocator, PyArray_StringDTypeObject, NPY_NO_EXPORT: +#include "numpy/ndarraytypes.h" + +// Forward declarations not currently in a header. +// XXX Where should these be moved? +NPY_NO_EXPORT npy_string_allocator * +NpyString_acquire_allocator(const PyArray_StringDTypeObject *descr); +NPY_NO_EXPORT void +NpyString_release_allocator(npy_string_allocator *allocator); + + +namespace np { namespace raii { + +// +// RAII for PyGILState_* API. +// +// In C++ code, use this at the beginning of a scope, e.g. +// +// { +// np::raii::EnsureGIL ensure_gil{}; +// [code that uses the Python C API here] +// } +// +// instead of +// +// PyGILState_STATE gil_state = PyGILState_Ensure(); +// [code that uses the Python C API here] +// PyGILState_Release(gil_state); +// +// or +// NPY_ALLOW_C_API_DEF +// NPY_ALLOW_C_API +// [code that uses the Python C API here] +// NPY_DISABLE_C_API +// +// This ensures that PyGILState_Release(gil_state) is called, even if the +// wrapped code throws an exception or executes a return or a goto. +// +class EnsureGIL +{ + PyGILState_STATE gil_state; + +public: + + EnsureGIL() { + gil_state = PyGILState_Ensure(); + } + + ~EnsureGIL() { + PyGILState_Release(gil_state); + } + + EnsureGIL(const EnsureGIL&) = delete; + EnsureGIL(EnsureGIL&& other) = delete; + EnsureGIL& operator=(const EnsureGIL&) = delete; + EnsureGIL& operator=(EnsureGIL&&) = delete; +}; + + +// +// RAII for Python thread state. +// +// In C++ code, use this at the beginning of a scope, e.g. +// +// { +// np::raii::SaveThreadState save_thread_state{}; +// [code...] +// } +// +// instead of +// +// PyThreadState *thread_state = PyEval_SaveThread(); +// [code...] +// PyEval_RestoreThread(thread_state); +// +// or +// Py_BEGIN_ALLOW_THREADS +// [code...] +// Py_END_ALLOW_THREADS +// +// or +// NPY_BEGIN_THREADS_DEF +// NPY_BEGIN_THREADS +// [code...] +// NPY_END_THREADS +// +// This ensures that PyEval_RestoreThread(thread_state) is called, even +// if the wrapped code throws an exception or executes a return or a goto. +// +class SaveThreadState +{ + PyThreadState *thread_state; + +public: + + SaveThreadState() { + thread_state = PyEval_SaveThread(); + } + + ~SaveThreadState() { + PyEval_RestoreThread(thread_state); + } + + SaveThreadState(const SaveThreadState&) = delete; + SaveThreadState(SaveThreadState&& other) = delete; + SaveThreadState& operator=(const SaveThreadState&) = delete; + SaveThreadState& operator=(SaveThreadState&&) = delete; +}; + + +// +// RAII for npy_string_allocator. +// +// Instead of +// +// Py_INCREF(descr); +// npy_string_allocator *allocator = NpyString_acquire_allocator(descr); +// [code that uses allocator] +// NpyString_release_allocator(allocator); +// Py_DECREF(descr); +// +// use +// +// { +// np::raii::NpyStringAcquireAllocator alloc(descr); +// [code that uses alloc.allocator()] +// } +// +class NpyStringAcquireAllocator +{ + PyArray_StringDTypeObject *_descr; + npy_string_allocator *_allocator; + +public: + + NpyStringAcquireAllocator(PyArray_StringDTypeObject *descr) : _descr(descr) { + Py_INCREF(_descr); + _allocator = NpyString_acquire_allocator(_descr); + } + + ~NpyStringAcquireAllocator() { + NpyString_release_allocator(_allocator); + Py_DECREF(_descr); + } + + NpyStringAcquireAllocator(const NpyStringAcquireAllocator&) = delete; + NpyStringAcquireAllocator(NpyStringAcquireAllocator&& other) = delete; + NpyStringAcquireAllocator& operator=(const NpyStringAcquireAllocator&) = delete; + NpyStringAcquireAllocator& operator=(NpyStringAcquireAllocator&&) = delete; + + npy_string_allocator *allocator() { + return _allocator; + } +}; + +}} // namespace np { namespace raii { + +#endif diff --git a/numpy/_core/src/common/simd/README.md b/numpy/_core/src/common/simd/README.md new file mode 100644 index 000000000000..a13a0f75b6fc --- /dev/null +++ b/numpy/_core/src/common/simd/README.md @@ -0,0 +1,266 @@ +# NumPy SIMD Wrapper for Highway + +This directory contains a lightweight C++ wrapper over Google's [Highway](https://github.com/google/highway) SIMD library, designed specifically for NumPy's needs. + +> **Note**: This directory also contains the C interface of universal intrinsics (under `simd.h`) which is no longer supported. The Highway wrapper described in this document should be used instead for all new SIMD code. + +## Overview + +The wrapper simplifies Highway's SIMD interface by eliminating class tags and using lane types directly, which can be deduced from arguments in most cases. This design makes the SIMD code more intuitive and easier to maintain while still leveraging Highway generic intrinsics. + +## Architecture + +The wrapper consists of two main headers: + +1. `simd.hpp`: The main header that defines namespaces and includes configuration macros +2. `simd.inc.hpp`: Implementation details included by `simd.hpp` multiple times for different namespaces + +Additionally, this directory contains legacy C interface files for universal intrinsics (`simd.h` and related files) which are deprecated and should not be used for new code. All new SIMD code should use the Highway wrapper. + + +## Usage + +### Basic Usage + +```cpp +#include "simd/simd.hpp" + +// Use np::simd for maximum width SIMD operations +using namespace np::simd; +float *data = /* ... */; +Vec v = LoadU(data); +v = Add(v, v); +StoreU(v, data); + +// Use np::simd128 for fixed 128-bit SIMD operations +using namespace np::simd128; +Vec v128 = LoadU(data); +v128 = Add(v128, v128); +StoreU(v128, data); +``` + +### Checking for SIMD Support + +```cpp +#include "simd/simd.hpp" + +// Check if SIMD is enabled +#if NPY_HWY + // SIMD code +#else + // Scalar fallback code +#endif + +// Check for float64 support +#if NPY_HWY_F64 + // Use float64 SIMD operations +#endif + +// Check for FMA support +#if NPY_HWY_FMA + // Use FMA operations +#endif +``` + +## Type Support and Constraints + +The wrapper provides type constraints to help with SFINAE (Substitution Failure Is Not An Error) and compile-time type checking: + +- `kSupportLane`: Determines whether the specified lane type is supported by the SIMD extension. + ```cpp + // Base template - always defined, even when SIMD is not enabled (for SFINAE) + template + constexpr bool kSupportLane = NPY_HWY != 0; + template <> + constexpr bool kSupportLane = NPY_HWY_F64 != 0; + ``` + +- `kMaxLanes`: Maximum number of lanes supported by the SIMD extension for the specified lane type. + ```cpp + template + constexpr size_t kMaxLanes = HWY_MAX_LANES_D(_Tag); + ``` + +```cpp +#include "simd/simd.hpp" + +// Check if float64 operations are supported +if constexpr (np::simd::kSupportLane) { + // Use float64 operations +} +``` + +These constraints allow for compile-time checking of which lane types are supported, which can be used in SFINAE contexts to enable or disable functions based on type support. + +## Available Operations + +The wrapper provides the following common operations that are used in NumPy: + +- Vector creation operations: + - `Zero`: Returns a vector with all lanes set to zero + - `Set`: Returns a vector with all lanes set to the given value + - `Undefined`: Returns an uninitialized vector + +- Memory operations: + - `LoadU`: Unaligned load of a vector from memory + - `StoreU`: Unaligned store of a vector to memory + +- Vector information: + - `Lanes`: Returns the number of vector lanes based on the lane type + +- Type conversion: + - `BitCast`: Reinterprets a vector to a different type without modifying the underlying data + - `VecFromMask`: Converts a mask to a vector + +- Comparison operations: + - `Eq`: Element-wise equality comparison + - `Le`: Element-wise less than or equal comparison + - `Lt`: Element-wise less than comparison + - `Gt`: Element-wise greater than comparison + - `Ge`: Element-wise greater than or equal comparison + +- Arithmetic operations: + - `Add`: Element-wise addition + - `Sub`: Element-wise subtraction + - `Mul`: Element-wise multiplication + - `Div`: Element-wise division + - `Min`: Element-wise minimum + - `Max`: Element-wise maximum + - `Abs`: Element-wise absolute value + - `Sqrt`: Element-wise square root + +- Logical operations: + - `And`: Bitwise AND + - `Or`: Bitwise OR + - `Xor`: Bitwise XOR + - `AndNot`: Bitwise AND NOT (a & ~b) + +Additional Highway operations can be accessed via the `hn` namespace alias inside the `simd` or `simd128` namespaces. + +## Extending + +To add more operations from Highway: + +1. Import them in the `simd.inc.hpp` file using the `using` directive if they don't require a tag: + ```cpp + // For operations that don't require a tag + using hn::FunctionName; + ``` + +2. Define wrapper functions for intrinsics that require a class tag: + ```cpp + // For operations that require a tag + template + HWY_API ReturnType FunctionName(Args... args) { + return hn::FunctionName(_Tag(), args...); + } + ``` + +3. Add appropriate documentation and SFINAE constraints if needed + + +## Build Configuration + +The SIMD wrapper automatically disables SIMD operations when optimizations are disabled: + +- When `NPY_DISABLE_OPTIMIZATION` is defined, SIMD operations are disabled +- SIMD is enabled only when the Highway target is not scalar (`HWY_TARGET != HWY_SCALAR`) + and not EMU128 (`HWY_TARGET != HWY_EMU128`) + +## Design Notes + +1. **Why avoid Highway scalar operations?** + - NumPy already provides kernels for scalar operations + - Compilers can better optimize standard library implementations + - Not all Highway intrinsics are fully supported in scalar mode + - For strict IEEE 754 floating-point compliance requirements, direct scalar + implementations offer more predictable behavior than EMU128 + +2. **Legacy Universal Intrinsics** + - The older universal intrinsics C interface (in `simd.h` and accessible via `NPY_SIMD` macros) is deprecated + - All new SIMD code should use this Highway-based wrapper (accessible via `NPY_HWY` macros) + - The legacy code is maintained for compatibility but will eventually be removed + +3. **Feature Detection Constants vs. Highway Constants** + - NumPy-specific constants (`NPY_HWY_F16`, `NPY_HWY_F64`, `NPY_HWY_FMA`) provide additional safety beyond raw Highway constants + - Highway constants (e.g., `HWY_HAVE_FLOAT16`) only check platform capabilities but don't consider NumPy's build configuration + - Our constants combine both checks: + ```cpp + #define NPY_HWY_F16 (NPY_HWY && HWY_HAVE_FLOAT16) + ``` + - This ensures SIMD features won't be used when: + - Platform supports it but NumPy optimization is disabled via meson option: + ``` + option('disable-optimization', type: 'boolean', value: false, + description: 'Disable CPU optimized code (dispatch,simd,unroll...)') + ``` + - Highway target is scalar (`HWY_TARGET == HWY_SCALAR`) + - Using these constants ensures consistent behavior across different compilation settings + - Without this additional layer, code might incorrectly try to use SIMD paths in scalar mode + +4. **Namespace Design** + - `np::simd`: Maximum width SIMD operations (scalable) + - `np::simd128`: Fixed 128-bit SIMD operations + - `hn`: Highway namespace alias (available within the SIMD namespaces) + +5. **Why Namespaces and Why Not Just Use Highway Directly?** + - Highway's design uses class tag types as template parameters (e.g., `Vec>`) when defining vector types + - Many Highway functions require explicitly passing a tag instance as the first parameter + - This class tag-based approach increases verbosity and complexity in user code + - Our wrapper eliminates this by internally managing tags through namespaces, letting users directly use types e.g. `Vec` + - Simple example with raw Highway: + ```cpp + // Highway's approach + float *data = /* ... */; + + namespace hn = hwy::HWY_NAMESPACE; + using namespace hn; + + // Full-width operations + ScalableTag df; // Create a tag instance + Vec v = LoadU(df, data); // LoadU requires a tag instance + StoreU(v, df, data); // StoreU requires a tag instance + + // 128-bit operations + Full128 df128; // Create a 128-bit tag instance + Vec v128 = LoadU(df128, data); // LoadU requires a tag instance + StoreU(v128, df128, data); // StoreU requires a tag instance + ``` + + - Simple example with our wrapper: + ```cpp + // Our wrapper approach + float *data = /* ... */; + + // Full-width operations + using namespace np::simd; + Vec v = LoadU(data); // Full-width vector load + StoreU(v, data); + + // 128-bit operations + using namespace np::simd128; + Vec v128 = LoadU(data); // 128-bit vector load + StoreU(v128, data); + ``` + + - The namespaced approach simplifies code, reduces errors, and provides a more intuitive interface + - It preserves all Highway operations benefits while reducing cognitive overhead + +5. **Why Namespaces Are Essential for This Design?** + - Namespaces allow us to define different internal tag types (`hn::ScalableTag` in `np::simd` vs `hn::Full128` in `np::simd128`) + - This provides a consistent type-based interface (`Vec`) without requiring users to manually create tags + - Enables using the same function names (like `LoadU`) with different implementations based on SIMD width + - Without namespaces, we'd have to either reintroduce tags (defeating the purpose of the wrapper) or create different function names for each variant (e.g., `LoadU` vs `LoadU128`) + +6. **Template Type Parameters** + - `TLane`: The scalar type for each vector lane (e.g., uint8_t, float, double) + + +## Requirements + +- C++17 or later +- Google Highway library + +## License + +Same as NumPy's license diff --git a/numpy/_core/src/common/simd/avx2/arithmetic.h b/numpy/_core/src/common/simd/avx2/arithmetic.h index 58d842a6d3a4..15b9be85dc51 100644 --- a/numpy/_core/src/common/simd/avx2/arithmetic.h +++ b/numpy/_core/src/common/simd/avx2/arithmetic.h @@ -215,9 +215,9 @@ NPY_FINLINE npyv_s64 npyv_divc_s64(npyv_s64 a, const npyv_s64x3 divisor) // q = (a + mulhi) >> sh __m256i q = _mm256_add_epi64(a, mulhi); // emulate arithmetic right shift - const __m256i sigb = npyv_setall_s64(1LL << 63); - q = _mm256_srl_epi64(_mm256_add_epi64(q, sigb), shf1); - q = _mm256_sub_epi64(q, _mm256_srl_epi64(sigb, shf1)); + const __m256i sbit = npyv_setall_s64(0x8000000000000000); + q = _mm256_srl_epi64(_mm256_add_epi64(q, sbit), shf1); + q = _mm256_sub_epi64(q, _mm256_srl_epi64(sbit, shf1)); // q = q - XSIGN(a) // trunc(a/d) = (q ^ dsign) - dsign q = _mm256_sub_epi64(q, asign); diff --git a/numpy/_core/src/common/simd/simd.h b/numpy/_core/src/common/simd/simd.h index fe4ca4da92f5..fd1be6e0c867 100644 --- a/numpy/_core/src/common/simd/simd.h +++ b/numpy/_core/src/common/simd/simd.h @@ -23,7 +23,7 @@ extern "C" { /* * clang commit an aggressive optimization behaviour when flag `-ftrapping-math` * isn't fully supported that's present at -O1 or greater. When partially loading a - * vector register for a operations that requires to fill up the remaining lanes + * vector register for an operation that requires to fill up the remaining lanes * with certain value for example divide operation needs to fill the remaining value * with non-zero integer to avoid fp exception divide-by-zero. * clang optimizer notices that the entire register is not needed for the store diff --git a/numpy/_core/src/common/simd/simd.hpp b/numpy/_core/src/common/simd/simd.hpp new file mode 100644 index 000000000000..40556a68c59d --- /dev/null +++ b/numpy/_core/src/common/simd/simd.hpp @@ -0,0 +1,86 @@ +#ifndef NUMPY__CORE_SRC_COMMON_SIMD_SIMD_HPP_ +#define NUMPY__CORE_SRC_COMMON_SIMD_SIMD_HPP_ + +/** + * This header provides a thin wrapper over Google's Highway SIMD library. + * + * The wrapper aims to simplify the SIMD interface of Google's Highway by + * get ride of its class tags and use lane types directly which can be deduced + * from the args in most cases. + */ +/** + * Since `NPY_SIMD` is only limited to NumPy C universal intrinsics, + * `NPY_HWY` is defined to indicate the SIMD availability for Google's Highway + * C++ code. + * + * Highway SIMD is only available when optimization is enabled. + * When NPY_DISABLE_OPTIMIZATION is defined, SIMD operations are disabled + * and the code falls back to scalar implementations. + */ +#ifndef NPY_DISABLE_OPTIMIZATION +#include + +/** + * We avoid using Highway scalar operations for the following reasons: + * + * 1. NumPy already provides optimized kernels for scalar operations. Using these + * existing implementations is more consistent with NumPy's architecture and + * allows for compiler optimizations specific to standard library calls. + * + * 2. Not all Highway intrinsics are fully supported in scalar mode, which could + * lead to compilation errors or unexpected behavior for certain operations. + * + * 3. For NumPy's strict IEEE 754 floating-point compliance requirements, direct scalar + * implementations offer more predictable behavior than EMU128. + * + * Therefore, we only enable Highway SIMD when targeting actual SIMD instruction sets. + */ +#define NPY_HWY ((HWY_TARGET != HWY_SCALAR) && (HWY_TARGET != HWY_EMU128)) + +// Indicates if the SIMD operations are available for float16. +#define NPY_HWY_F16 (NPY_HWY && HWY_HAVE_FLOAT16) +// Note: Highway requires SIMD extentions with native float32 support, so we don't need +// to check for it. + +// Indicates if the SIMD operations are available for float64. +#define NPY_HWY_F64 (NPY_HWY && HWY_HAVE_FLOAT64) + +// Indicates if the SIMD floating operations are natively supports fma. +#define NPY_HWY_FMA (NPY_HWY && HWY_NATIVE_FMA) + +#else +#define NPY_HWY 0 +#define NPY_HWY_F16 0 +#define NPY_HWY_F64 0 +#define NPY_HWY_FMA 0 +#endif + +namespace np { + +/// Represents the max SIMD width supported by the platform. +namespace simd { +#if NPY_HWY +/// The highway namespace alias. +/// We can not import all the symbols from the HWY_NAMESPACE because it will +/// conflict with the existing symbols in the numpy namespace. +namespace hn = hwy::HWY_NAMESPACE; +// internaly used by the template header +template +using _Tag = hn::ScalableTag; +#endif +#include "simd.inc.hpp" +} // namespace simd + +/// Represents the 128-bit SIMD width. +namespace simd128 { +#if NPY_HWY +namespace hn = hwy::HWY_NAMESPACE; +template +using _Tag = hn::Full128; +#endif +#include "simd.inc.hpp" +} // namespace simd128 + +} // namespace np + +#endif // NUMPY__CORE_SRC_COMMON_SIMD_SIMD_HPP_ diff --git a/numpy/_core/src/common/simd/simd.inc.hpp b/numpy/_core/src/common/simd/simd.inc.hpp new file mode 100644 index 000000000000..f4a2540927dd --- /dev/null +++ b/numpy/_core/src/common/simd/simd.inc.hpp @@ -0,0 +1,132 @@ +#ifndef NPY_HWY +#error "This is not a standalone header. Include simd.hpp instead." +#define NPY_HWY 1 // Prevent editors from graying out the happy branch +#endif + +// Using anonymous namespace instead of inline to ensure each translation unit +// gets its own copy of constants based on local compilation flags +namespace { + +// NOTE: This file is included by simd.hpp multiple times with different namespaces +// so avoid including any headers here + +/** + * Determines whether the specified lane type is supported by the SIMD extension. + * Always defined as false when SIMD is not enabled, so it can be used in SFINAE. + * + * @tparam TLane The lane type to check for support. + */ +template +constexpr bool kSupportLane = NPY_HWY != 0; + +#if NPY_HWY +// Define lane type support based on Highway capabilities +template <> +constexpr bool kSupportLane = HWY_HAVE_FLOAT16 != 0; +template <> +constexpr bool kSupportLane = HWY_HAVE_FLOAT64 != 0; +template <> +constexpr bool kSupportLane = + HWY_HAVE_FLOAT64 != 0 && sizeof(long double) == sizeof(double); + +/// Maximum number of lanes supported by the SIMD extension for the specified lane type. +template +constexpr size_t kMaxLanes = HWY_MAX_LANES_D(_Tag); + +/// Represents an N-lane vector based on the specified lane type. +/// @tparam TLane The scalar type for each vector lane +template +using Vec = hn::Vec<_Tag>; + +/// Represents a mask vector with boolean values or as a bitmask. +/// @tparam TLane The scalar type the mask corresponds to +template +using Mask = hn::Mask<_Tag>; + +/// Unaligned load of a vector from memory. +template +HWY_API Vec +LoadU(const TLane *ptr) +{ + return hn::LoadU(_Tag(), ptr); +} + +/// Unaligned store of a vector to memory. +template +HWY_API void +StoreU(const Vec &a, TLane *ptr) +{ + hn::StoreU(a, _Tag(), ptr); +} + +/// Returns the number of vector lanes based on the lane type. +template +HWY_API HWY_LANES_CONSTEXPR size_t +Lanes(TLane tag = 0) +{ + return hn::Lanes(_Tag()); +} + +/// Returns an uninitialized N-lane vector. +template +HWY_API Vec +Undefined(TLane tag = 0) +{ + return hn::Undefined(_Tag()); +} + +/// Returns N-lane vector with all lanes equal to zero. +template +HWY_API Vec +Zero(TLane tag = 0) +{ + return hn::Zero(_Tag()); +} + +/// Returns N-lane vector with all lanes equal to the given value of type `TLane`. +template +HWY_API Vec +Set(TLane val) +{ + return hn::Set(_Tag(), val); +} + +/// Converts a mask to a vector based on the specified lane type. +template +HWY_API Vec +VecFromMask(const TMask &m) +{ + return hn::VecFromMask(_Tag(), m); +} + +/// Convert (Reinterpret) an N-lane vector to a different type without modifying the +/// underlying data. +template +HWY_API Vec +BitCast(const TVec &v) +{ + return hn::BitCast(_Tag(), v); +} + +// Import common Highway intrinsics +using hn::Abs; +using hn::Add; +using hn::And; +using hn::AndNot; +using hn::Div; +using hn::Eq; +using hn::Ge; +using hn::Gt; +using hn::Le; +using hn::Lt; +using hn::Max; +using hn::Min; +using hn::Mul; +using hn::Or; +using hn::Sqrt; +using hn::Sub; +using hn::Xor; + +#endif // NPY_HWY + +} // namespace diff --git a/numpy/_core/src/common/simd/sse/arithmetic.h b/numpy/_core/src/common/simd/sse/arithmetic.h index 357b136d25cd..b50942ab75ad 100644 --- a/numpy/_core/src/common/simd/sse/arithmetic.h +++ b/numpy/_core/src/common/simd/sse/arithmetic.h @@ -251,9 +251,9 @@ NPY_FINLINE npyv_s64 npyv_divc_s64(npyv_s64 a, const npyv_s64x3 divisor) // q = (a + mulhi) >> sh __m128i q = _mm_add_epi64(a, mulhi); // emulate arithmetic right shift - const __m128i sigb = npyv_setall_s64(1LL << 63); - q = _mm_srl_epi64(_mm_add_epi64(q, sigb), divisor.val[1]); - q = _mm_sub_epi64(q, _mm_srl_epi64(sigb, divisor.val[1])); + const __m128i sbit = npyv_setall_s64(0x8000000000000000); + q = _mm_srl_epi64(_mm_add_epi64(q, sbit), divisor.val[1]); + q = _mm_sub_epi64(q, _mm_srl_epi64(sbit, divisor.val[1])); // q = q - XSIGN(a) // trunc(a/d) = (q ^ dsign) - dsign q = _mm_sub_epi64(q, asign); diff --git a/numpy/_core/src/common/simd/vec/utils.h b/numpy/_core/src/common/simd/vec/utils.h index f8b28cfebd8c..7e4a7b8de8fa 100644 --- a/numpy/_core/src/common/simd/vec/utils.h +++ b/numpy/_core/src/common/simd/vec/utils.h @@ -25,14 +25,16 @@ #ifndef vec_neg #define vec_neg(a) (-(a)) #endif - #ifndef vec_and - #define vec_and(a, b) ((a) & (b)) // Vector AND - #endif - #ifndef vec_or - #define vec_or(a, b) ((a) | (b)) // Vector OR - #endif - #ifndef vec_xor - #define vec_xor(a, b) ((a) ^ (b)) // Vector XOR + #if !(defined(__clang__) && __VEC__ >= 10305) + #ifndef vec_and + #define vec_and(a, b) ((a) & (b)) // Vector AND + #endif + #ifndef vec_or + #define vec_or(a, b) ((a) | (b)) // Vector OR + #endif + #ifndef vec_xor + #define vec_xor(a, b) ((a) ^ (b)) // Vector XOR + #endif #endif #ifndef vec_sl #define vec_sl(a, b) ((a) << (b)) // Vector Shift Left diff --git a/numpy/_core/src/common/ufunc_override.c b/numpy/_core/src/common/ufunc_override.c index e98315f14a94..0bcbea5baa30 100644 --- a/numpy/_core/src/common/ufunc_override.c +++ b/numpy/_core/src/common/ufunc_override.c @@ -108,7 +108,7 @@ PyUFuncOverride_GetOutObjects(PyObject *kwds, PyObject **out_kwd_obj, PyObject * * PySequence_Fast* functions. This is required for PyPy */ PyObject *seq; - seq = PySequence_Fast(*out_kwd_obj, + seq = PySequence_Fast(*out_kwd_obj, // noqa: borrowed-ref OK "Could not convert object to sequence"); if (seq == NULL) { Py_CLEAR(*out_kwd_obj); diff --git a/numpy/_core/src/dummymodule.c b/numpy/_core/src/dummymodule.c index 2f293d6c4cd6..e1ef80ab3af3 100644 --- a/numpy/_core/src/dummymodule.c +++ b/numpy/_core/src/dummymodule.c @@ -14,25 +14,27 @@ static struct PyMethodDef methods[] = { {NULL, NULL, 0, NULL} }; +static struct PyModuleDef_Slot dummy_slots[] = { +#if PY_VERSION_HEX >= 0x030c00f0 // Python 3.12+ + // signal that this module can be imported in isolated subinterpreters + {Py_mod_multiple_interpreters, Py_MOD_PER_INTERPRETER_GIL_SUPPORTED}, +#endif +#if PY_VERSION_HEX >= 0x030d00f0 // Python 3.13+ + // signal that this module supports running without an active GIL + {Py_mod_gil, Py_MOD_GIL_NOT_USED}, +#endif + {0, NULL}, +}; static struct PyModuleDef moduledef = { - PyModuleDef_HEAD_INIT, - "dummy", - NULL, - -1, - methods, - NULL, - NULL, - NULL, - NULL + .m_base = PyModuleDef_HEAD_INIT, + .m_name = "dummy", + .m_size = 0, + .m_methods = methods, + .m_slots = dummy_slots, }; /* Initialization function for the module */ PyMODINIT_FUNC PyInit__dummy(void) { - PyObject *m; - m = PyModule_Create(&moduledef); - if (!m) { - return NULL; - } - return m; + return PyModuleDef_Init(&moduledef); } diff --git a/numpy/_core/src/highway b/numpy/_core/src/highway index 0b696633f9ad..ee36c8371293 160000 --- a/numpy/_core/src/highway +++ b/numpy/_core/src/highway @@ -1 +1 @@ -Subproject commit 0b696633f9ad89497dd5532b55eaa01625ad71ca +Subproject commit ee36c837129310be19c17c9108c6dc3f6ae06942 diff --git a/numpy/_core/src/multiarray/_datetime.h b/numpy/_core/src/multiarray/_datetime.h index dd25e1ffd6cc..5261e8232a08 100644 --- a/numpy/_core/src/multiarray/_datetime.h +++ b/numpy/_core/src/multiarray/_datetime.h @@ -174,8 +174,8 @@ convert_datetime_metadata_tuple_to_datetime_metadata(PyObject *tuple, npy_bool from_pickle); /* - * Gets a tzoffset in minutes by calling the fromutc() function on - * the Python datetime.tzinfo object. + * Gets a tzoffset in minutes by calling the astimezone() function on + * the Python datetime.datetime object. */ NPY_NO_EXPORT int get_tzoffset_from_pytzinfo(PyObject *timezone, npy_datetimestruct *dts); @@ -242,9 +242,10 @@ convert_pyobject_to_timedelta(PyArray_DatetimeMetaData *meta, PyObject *obj, /* * Converts a datetime into a PyObject *. * - * For days or coarser, returns a datetime.date. - * For microseconds or coarser, returns a datetime.datetime. - * For units finer than microseconds, returns an integer. + * NaT (Not-a-time) is returned as None. + * For D/W/Y/M (days or coarser), returns a datetime.date. + * For Îŧs/ms/s/m/h/D/W (microseconds or coarser), returns a datetime.datetime. + * For ns/ps/fs/as (units shorter than microseconds), returns an integer. */ NPY_NO_EXPORT PyObject * convert_datetime_to_pyobject(npy_datetime dt, PyArray_DatetimeMetaData *meta); @@ -252,9 +253,9 @@ convert_datetime_to_pyobject(npy_datetime dt, PyArray_DatetimeMetaData *meta); /* * Converts a timedelta into a PyObject *. * - * Not-a-time is returned as the string "NaT". - * For microseconds or coarser, returns a datetime.timedelta. - * For units finer than microseconds, returns an integer. + * NaT (Not-a-time) is returned as None. + * For Îŧs/ms/s/m/h/D/W (microseconds or coarser), returns a datetime.timedelta. + * For Y/M (non-linear units), generic units and ns/ps/fs/as (units shorter than microseconds), returns an integer. */ NPY_NO_EXPORT PyObject * convert_timedelta_to_pyobject(npy_timedelta td, PyArray_DatetimeMetaData *meta); diff --git a/numpy/_core/src/multiarray/_multiarray_tests.c.src b/numpy/_core/src/multiarray/_multiarray_tests.c.src index fc73a64b19a0..b79908e1d5e4 100644 --- a/numpy/_core/src/multiarray/_multiarray_tests.c.src +++ b/numpy/_core/src/multiarray/_multiarray_tests.c.src @@ -7,6 +7,7 @@ #include "numpy/arrayscalars.h" #include "numpy/npy_math.h" #include "numpy/halffloat.h" +#include "npy_import.h" #include "common.h" #include "npy_argparse.h" #include "mem_overlap.h" @@ -644,7 +645,7 @@ incref_elide_l(PyObject *dummy, PyObject *args) } /* get item without increasing refcount, item may still be on the python * stack but above the inaccessible top */ - r = PyList_GetItem(arg, 4); + r = PyList_GetItem(arg, 4); // noqa: borrowed-ref OK res = PyNumber_Add(r, r); return res; @@ -863,7 +864,7 @@ get_all_cast_information(PyObject *NPY_UNUSED(mod), PyObject *NPY_UNUSED(args)) if (classes == NULL) { goto fail; } - Py_SETREF(classes, PySequence_Fast(classes, NULL)); + Py_SETREF(classes, PySequence_Fast(classes, NULL)); // noqa: borrowed-ref OK if (classes == NULL) { goto fail; } @@ -883,7 +884,7 @@ get_all_cast_information(PyObject *NPY_UNUSED(mod), PyObject *NPY_UNUSED(args)) PyObject *to_dtype, *cast_obj; Py_ssize_t pos = 0; - while (PyDict_Next(NPY_DT_SLOTS(from_dtype)->castingimpls, + while (PyDict_Next(NPY_DT_SLOTS(from_dtype)->castingimpls, // noqa: borrowed-ref OK &pos, &to_dtype, &cast_obj)) { if (cast_obj == Py_None) { continue; @@ -965,7 +966,7 @@ identityhash_tester(PyObject *NPY_UNUSED(mod), } /* Replace the sequence with a guaranteed fast-sequence */ - sequence = PySequence_Fast(sequence, "converting sequence."); + sequence = PySequence_Fast(sequence, "converting sequence."); // noqa: borrowed-ref OK if (sequence == NULL) { goto finish; } @@ -2089,10 +2090,18 @@ run_sortkind_converter(PyObject* NPY_UNUSED(self), PyObject *args) return NULL; } switch (kind) { - case _NPY_SORT_UNDEFINED: return PyUnicode_FromString("_NPY_SORT_UNDEFINED"); - case NPY_QUICKSORT: return PyUnicode_FromString("NPY_QUICKSORT"); - case NPY_HEAPSORT: return PyUnicode_FromString("NPY_HEAPSORT"); - case NPY_STABLESORT: return PyUnicode_FromString("NPY_STABLESORT"); + case _NPY_SORT_UNDEFINED: + return PyUnicode_FromString("_NPY_SORT_UNDEFINED"); + case NPY_QUICKSORT: + return PyUnicode_FromString("NPY_QUICKSORT"); + case NPY_HEAPSORT: + return PyUnicode_FromString("NPY_HEAPSORT"); + case NPY_STABLESORT: + return PyUnicode_FromString("NPY_STABLESORT"); + default: + // the other possible values in NPY_SORTKIND can only + // be set with keywords. + break; } return PyLong_FromLong(kind); } @@ -2162,12 +2171,17 @@ run_casting_converter(PyObject* NPY_UNUSED(self), PyObject *args) if (!PyArg_ParseTuple(args, "O&", PyArray_CastingConverter, &casting)) { return NULL; } - switch (casting) { + switch ((int)casting) { case NPY_NO_CASTING: return PyUnicode_FromString("NPY_NO_CASTING"); case NPY_EQUIV_CASTING: return PyUnicode_FromString("NPY_EQUIV_CASTING"); case NPY_SAFE_CASTING: return PyUnicode_FromString("NPY_SAFE_CASTING"); case NPY_SAME_KIND_CASTING: return PyUnicode_FromString("NPY_SAME_KIND_CASTING"); case NPY_UNSAFE_CASTING: return PyUnicode_FromString("NPY_UNSAFE_CASTING"); + case NPY_SAME_VALUE_CASTING: return PyUnicode_FromString("NPY_SAME_VALUE_CASTING"); + case NPY_NO_CASTING | NPY_SAME_VALUE_CASTING_FLAG: return PyUnicode_FromString("NPY_NO_CASTING | NPY_SAME_VALUE_CASTING_FLAG"); + case NPY_EQUIV_CASTING | NPY_SAME_VALUE_CASTING_FLAG: return PyUnicode_FromString("NPY_EQUIV_CASTING | NPY_SAME_VALUE_CASTING_FLAG"); + case NPY_SAFE_CASTING | NPY_SAME_VALUE_CASTING_FLAG: return PyUnicode_FromString("NPY_SAFE_CASTING | NPY_SAME_VALUE_CASTING_FLAG"); + case NPY_SAME_KIND_CASTING | NPY_SAME_VALUE_CASTING_FLAG: return PyUnicode_FromString("NPY_SAME_KIND_CASTING | NPY_SAME_VALUE_CASTING_FLAG"); default: return PyLong_FromLong(casting); } } @@ -2225,6 +2239,15 @@ run_scalar_intp_from_sequence(PyObject *NPY_UNUSED(self), PyObject *obj) return PyArray_IntTupleFromIntp(1, vals); } +static PyObject * +_npy_import_entry_point(PyObject *NPY_UNUSED(self), PyObject *obj) { + PyObject *res = PyUnicode_AsASCIIString(obj); + if (res != NULL) { + Py_SETREF(res, npy_import_entry_point(PyBytes_AS_STRING(res))); + } + return res; +} + static PyMethodDef Multiarray_TestsMethods[] = { {"argparse_example_function", (PyCFunction)argparse_example_function, @@ -2409,45 +2432,63 @@ static PyMethodDef Multiarray_TestsMethods[] = { {"run_intp_converter", run_intp_converter, METH_VARARGS, NULL}, + {"npy_import_entry_point", + _npy_import_entry_point, + METH_O, NULL}, {NULL, NULL, 0, NULL} /* Sentinel */ }; -static struct PyModuleDef moduledef = { - PyModuleDef_HEAD_INIT, - "_multiarray_tests", - NULL, - -1, - Multiarray_TestsMethods, - NULL, - NULL, - NULL, - NULL -}; +static int module_loaded = 0; -PyMODINIT_FUNC PyInit__multiarray_tests(void) +static int +_multiarray_tests_exec(PyObject *m) { - PyObject *m; + // https://docs.python.org/3/howto/isolating-extensions.html#opt-out-limiting-to-one-module-object-per-process + if (module_loaded) { + PyErr_SetString(PyExc_ImportError, + "cannot load module more than once per process"); + return -1; + } + module_loaded = 1; - m = PyModule_Create(&moduledef); - if (m == NULL) { - return m; + if (PyArray_ImportNumPyAPI() < 0) { + return -1; } - import_array(); if (init_argparse_mutex() < 0) { - return NULL; + return -1; } if (PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "cannot load _multiarray_tests module."); } -#if Py_GIL_DISABLED - // signal this module supports running with the GIL disabled - PyUnstable_Module_SetGIL(m, Py_MOD_GIL_NOT_USED); + return 0; +} + +static struct PyModuleDef_Slot _multiarray_tests_slots[] = { + {Py_mod_exec, _multiarray_tests_exec}, +#if PY_VERSION_HEX >= 0x030c00f0 // Python 3.12+ + {Py_mod_multiple_interpreters, Py_MOD_MULTIPLE_INTERPRETERS_NOT_SUPPORTED}, +#endif +#if PY_VERSION_HEX >= 0x030d00f0 // Python 3.13+ + // signal that this module supports running without an active GIL + {Py_mod_gil, Py_MOD_GIL_NOT_USED}, #endif + {0, NULL}, +}; + +static struct PyModuleDef moduledef = { + .m_base = PyModuleDef_HEAD_INIT, + .m_name = "_multiarray_tests", + .m_size = 0, + .m_methods = Multiarray_TestsMethods, + .m_slots = _multiarray_tests_slots, +}; - return m; +PyMODINIT_FUNC PyInit__multiarray_tests(void) +{ + return PyModuleDef_Init(&moduledef); } NPY_NO_EXPORT int diff --git a/numpy/_core/src/multiarray/alloc.c b/numpy/_core/src/multiarray/alloc.c index 280ca81a35a7..8061feed24e5 100644 --- a/numpy/_core/src/multiarray/alloc.c +++ b/numpy/_core/src/multiarray/alloc.c @@ -27,9 +27,24 @@ #endif #endif -#define NBUCKETS 1024 /* number of buckets for data*/ -#define NBUCKETS_DIM 16 /* number of buckets for dimensions/strides */ -#define NCACHE 7 /* number of cache entries per bucket */ +/* Do not enable the alloc cache if the GIL is disabled, or if ASAN or MSAN + * instrumentation is enabled. The cache makes ASAN use-after-free or MSAN + * use-of-uninitialized-memory warnings less useful. */ +#ifdef Py_GIL_DISABLED +# define USE_ALLOC_CACHE 0 +#elif defined(__has_feature) +# if __has_feature(address_sanitizer) || __has_feature(memory_sanitizer) +# define USE_ALLOC_CACHE 0 +# endif +#endif +#ifndef USE_ALLOC_CACHE +# define USE_ALLOC_CACHE 1 +#endif + + +# define NBUCKETS 1024 /* number of buckets for data*/ +# define NBUCKETS_DIM 16 /* number of buckets for dimensions/strides */ +# define NCACHE 7 /* number of cache entries per bucket */ /* this structure fits neatly into a cacheline */ typedef struct { npy_uintp available; /* number of cached pointers */ @@ -38,7 +53,6 @@ typedef struct { static cache_bucket datacache[NBUCKETS]; static cache_bucket dimcache[NBUCKETS_DIM]; - /* * This function tells whether NumPy attempts to call `madvise` with * `MADV_HUGEPAGE`. `madvise` is only ever used on linux, so the value @@ -99,20 +113,6 @@ indicate_hugepages(void *p, size_t size) { } -/* Do not enable the alloc cache if the GIL is disabled, or if ASAN or MSAN - * instrumentation is enabled. The cache makes ASAN use-after-free or MSAN - * use-of-uninitialized-memory warnings less useful. */ -#ifdef Py_GIL_DISABLED -#define USE_ALLOC_CACHE 0 -#elif defined(__has_feature) -# if __has_feature(address_sanitizer) || __has_feature(memory_sanitizer) -# define USE_ALLOC_CACHE 0 -# endif -#else -#define USE_ALLOC_CACHE 1 -#endif - - /* as the cache is managed in global variables verify the GIL is held */ /* diff --git a/numpy/_core/src/multiarray/alloc.h b/numpy/_core/src/multiarray/alloc.h index f5600c99aaa5..bef6407a28a3 100644 --- a/numpy/_core/src/multiarray/alloc.h +++ b/numpy/_core/src/multiarray/alloc.h @@ -75,7 +75,7 @@ _npy_init_workspace( /* * Helper definition macro for a small work/scratchspace. - * The `NAME` is the C array to to be defined of with the type `TYPE`. + * The `NAME` is the C array to be defined of with the type `TYPE`. * * The usage pattern for this is: * @@ -93,11 +93,16 @@ _npy_init_workspace( * With some caches, it may be possible to malloc/calloc very quickly in which * case we should not hesitate to replace this pattern. */ -#define NPY_ALLOC_WORKSPACE(NAME, TYPE, fixed_size, size) \ +#define NPY_DEFINE_WORKSPACE(NAME, TYPE, fixed_size) \ TYPE NAME##_static[fixed_size]; \ - TYPE *NAME; \ + TYPE *NAME; +#define NPY_INIT_WORKSPACE(NAME, TYPE, fixed_size, size) \ _npy_init_workspace((void **)&NAME, NAME##_static, (fixed_size), sizeof(TYPE), (size)) +#define NPY_ALLOC_WORKSPACE(NAME, TYPE, fixed_size, size) \ + NPY_DEFINE_WORKSPACE(NAME, TYPE, fixed_size) \ + NPY_INIT_WORKSPACE(NAME, TYPE, fixed_size, size) + static inline void _npy_free_workspace(void *buf, void *static_buf) diff --git a/numpy/_core/src/multiarray/array_assign_array.c b/numpy/_core/src/multiarray/array_assign_array.c index 8886d1cacb40..306ed07b0ace 100644 --- a/numpy/_core/src/multiarray/array_assign_array.c +++ b/numpy/_core/src/multiarray/array_assign_array.c @@ -29,6 +29,8 @@ #include "umathmodule.h" +#define NPY_ALIGNED_CASTING_FLAG 1 + /* * Check that array data is both uint-aligned and true-aligned for all array * elements, as required by the copy/casting code in lowlevel_strided_loops.c @@ -79,7 +81,8 @@ copycast_isaligned(int ndim, npy_intp const *shape, NPY_NO_EXPORT int raw_array_assign_array(int ndim, npy_intp const *shape, PyArray_Descr *dst_dtype, char *dst_data, npy_intp const *dst_strides, - PyArray_Descr *src_dtype, char *src_data, npy_intp const *src_strides) + PyArray_Descr *src_dtype, char *src_data, npy_intp const *src_strides, + int flags) { int idim; npy_intp shape_it[NPY_MAXDIMS]; @@ -87,14 +90,11 @@ raw_array_assign_array(int ndim, npy_intp const *shape, npy_intp src_strides_it[NPY_MAXDIMS]; npy_intp coord[NPY_MAXDIMS]; - int aligned; + int aligned = (flags & NPY_ALIGNED_CASTING_FLAG) != 0; + int same_value_cast = (flags & NPY_SAME_VALUE_CASTING_FLAG) != 0; NPY_BEGIN_THREADS_DEF; - aligned = - copycast_isaligned(ndim, shape, dst_dtype, dst_data, dst_strides) && - copycast_isaligned(ndim, shape, src_dtype, src_data, src_strides); - /* Use raw iteration with no heap allocation */ if (PyArray_PrepareTwoRawArrayIter( ndim, shape, @@ -120,21 +120,25 @@ raw_array_assign_array(int ndim, npy_intp const *shape, /* Get the function to do the casting */ NPY_cast_info cast_info; - NPY_ARRAYMETHOD_FLAGS flags; + NPY_ARRAYMETHOD_FLAGS method_flags; if (PyArray_GetDTypeTransferFunction(aligned, src_strides_it[0], dst_strides_it[0], src_dtype, dst_dtype, 0, - &cast_info, &flags) != NPY_SUCCEED) { + &cast_info, &method_flags) != NPY_SUCCEED) { return -1; } - if (!(flags & NPY_METH_NO_FLOATINGPOINT_ERRORS)) { + if (!(method_flags & NPY_METH_NO_FLOATINGPOINT_ERRORS)) { npy_clear_floatstatus_barrier((char*)&src_data); } + if (same_value_cast) { + cast_info.context.flags |= NPY_SAME_VALUE_CONTEXT_FLAG; + } + /* Ensure number of elements exceeds threshold for threading */ - if (!(flags & NPY_METH_REQUIRES_PYAPI)) { + if (!(method_flags & NPY_METH_REQUIRES_PYAPI)) { npy_intp nitems = 1, i; for (i = 0; i < ndim; i++) { nitems *= shape_it[i]; @@ -144,11 +148,14 @@ raw_array_assign_array(int ndim, npy_intp const *shape, npy_intp strides[2] = {src_strides_it[0], dst_strides_it[0]}; + int result = 0; NPY_RAW_ITER_START(idim, ndim, coord, shape_it) { /* Process the innermost dimension */ char *args[2] = {src_data, dst_data}; - if (cast_info.func(&cast_info.context, - args, &shape_it[0], strides, cast_info.auxdata) < 0) { + result = cast_info.func(&cast_info.context, + args, &shape_it[0], strides, + cast_info.auxdata); + if (result < 0) { goto fail; } } NPY_RAW_ITER_TWO_NEXT(idim, ndim, coord, shape_it, @@ -158,7 +165,7 @@ raw_array_assign_array(int ndim, npy_intp const *shape, NPY_END_THREADS; NPY_cast_info_xfree(&cast_info); - if (!(flags & NPY_METH_NO_FLOATINGPOINT_ERRORS)) { + if (!(method_flags & NPY_METH_NO_FLOATINGPOINT_ERRORS)) { int fpes = npy_get_floatstatus_barrier((char*)&src_data); if (fpes && PyUFunc_GiveFloatingpointErrors("cast", fpes) < 0) { return -1; @@ -183,7 +190,7 @@ raw_array_wheremasked_assign_array(int ndim, npy_intp const *shape, PyArray_Descr *dst_dtype, char *dst_data, npy_intp const *dst_strides, PyArray_Descr *src_dtype, char *src_data, npy_intp const *src_strides, PyArray_Descr *wheremask_dtype, char *wheremask_data, - npy_intp const *wheremask_strides) + npy_intp const *wheremask_strides, int flags) { int idim; npy_intp shape_it[NPY_MAXDIMS]; @@ -192,14 +199,11 @@ raw_array_wheremasked_assign_array(int ndim, npy_intp const *shape, npy_intp wheremask_strides_it[NPY_MAXDIMS]; npy_intp coord[NPY_MAXDIMS]; - int aligned; + int aligned = (flags & NPY_ALIGNED_CASTING_FLAG) != 0; + int same_value_cast = (flags & NPY_SAME_VALUE_CASTING_FLAG) != 0; NPY_BEGIN_THREADS_DEF; - aligned = - copycast_isaligned(ndim, shape, dst_dtype, dst_data, dst_strides) && - copycast_isaligned(ndim, shape, src_dtype, src_data, src_strides); - /* Use raw iteration with no heap allocation */ if (PyArray_PrepareThreeRawArrayIter( ndim, shape, @@ -229,39 +233,45 @@ raw_array_wheremasked_assign_array(int ndim, npy_intp const *shape, /* Get the function to do the casting */ NPY_cast_info cast_info; - NPY_ARRAYMETHOD_FLAGS flags; + NPY_ARRAYMETHOD_FLAGS method_flags; if (PyArray_GetMaskedDTypeTransferFunction(aligned, src_strides_it[0], dst_strides_it[0], wheremask_strides_it[0], src_dtype, dst_dtype, wheremask_dtype, 0, - &cast_info, &flags) != NPY_SUCCEED) { + &cast_info, &method_flags) != NPY_SUCCEED) { return -1; } + if (same_value_cast) { + cast_info.context.flags |= NPY_SAME_VALUE_CONTEXT_FLAG; + } - if (!(flags & NPY_METH_NO_FLOATINGPOINT_ERRORS)) { + if (!(method_flags & NPY_METH_NO_FLOATINGPOINT_ERRORS)) { npy_clear_floatstatus_barrier(src_data); } - if (!(flags & NPY_METH_REQUIRES_PYAPI)) { + if (!(method_flags & NPY_METH_REQUIRES_PYAPI)) { npy_intp nitems = 1, i; for (i = 0; i < ndim; i++) { nitems *= shape_it[i]; } NPY_BEGIN_THREADS_THRESHOLDED(nitems); } + npy_intp strides[2] = {src_strides_it[0], dst_strides_it[0]}; + int result = 0; NPY_RAW_ITER_START(idim, ndim, coord, shape_it) { PyArray_MaskedStridedUnaryOp *stransfer; stransfer = (PyArray_MaskedStridedUnaryOp *)cast_info.func; /* Process the innermost dimension */ char *args[2] = {src_data, dst_data}; - if (stransfer(&cast_info.context, - args, &shape_it[0], strides, - (npy_bool *)wheremask_data, wheremask_strides_it[0], - cast_info.auxdata) < 0) { + result = stransfer(&cast_info.context, + args, &shape_it[0], strides, + (npy_bool *)wheremask_data, wheremask_strides_it[0], + cast_info.auxdata); + if (result < 0) { goto fail; } } NPY_RAW_ITER_THREE_NEXT(idim, ndim, coord, shape_it, @@ -272,15 +282,13 @@ raw_array_wheremasked_assign_array(int ndim, npy_intp const *shape, NPY_END_THREADS; NPY_cast_info_xfree(&cast_info); - if (!(flags & NPY_METH_NO_FLOATINGPOINT_ERRORS)) { + if (!(method_flags & NPY_METH_NO_FLOATINGPOINT_ERRORS)) { int fpes = npy_get_floatstatus_barrier(src_data); if (fpes && PyUFunc_GiveFloatingpointErrors("cast", fpes) < 0) { return -1; } } - return 0; - fail: NPY_END_THREADS; NPY_cast_info_xfree(&cast_info); @@ -307,7 +315,6 @@ PyArray_AssignArray(PyArrayObject *dst, PyArrayObject *src, NPY_CASTING casting) { int copied_src = 0; - npy_intp src_strides[NPY_MAXDIMS]; /* Use array_assign_scalar if 'src' NDIM is 0 */ @@ -438,12 +445,21 @@ PyArray_AssignArray(PyArrayObject *dst, PyArrayObject *src, } } + int flags = (NPY_SAME_VALUE_CASTING_FLAG & casting); + if (copycast_isaligned(PyArray_NDIM(dst), PyArray_DIMS(dst), PyArray_DESCR(dst), + PyArray_DATA(dst), PyArray_STRIDES(dst)) && + copycast_isaligned(PyArray_NDIM(dst), PyArray_DIMS(dst), PyArray_DESCR(src), + PyArray_DATA(src), src_strides)) { + /* NPY_ALIGNED_CASTING_FLAG is internal to this file */ + flags |= NPY_ALIGNED_CASTING_FLAG; + } + if (wheremask == NULL) { /* A straightforward value assignment */ /* Do the assignment with raw array iteration */ if (raw_array_assign_array(PyArray_NDIM(dst), PyArray_DIMS(dst), PyArray_DESCR(dst), PyArray_DATA(dst), PyArray_STRIDES(dst), - PyArray_DESCR(src), PyArray_DATA(src), src_strides) < 0) { + PyArray_DESCR(src), PyArray_DATA(src), src_strides, flags) < 0){ goto fail; } } @@ -465,7 +481,7 @@ PyArray_AssignArray(PyArrayObject *dst, PyArrayObject *src, PyArray_DESCR(dst), PyArray_DATA(dst), PyArray_STRIDES(dst), PyArray_DESCR(src), PyArray_DATA(src), src_strides, PyArray_DESCR(wheremask), PyArray_DATA(wheremask), - wheremask_strides) < 0) { + wheremask_strides, flags) < 0) { goto fail; } } diff --git a/numpy/_core/src/multiarray/array_assign_scalar.c b/numpy/_core/src/multiarray/array_assign_scalar.c index 0199ba969eb9..f7d04ed0a39f 100644 --- a/numpy/_core/src/multiarray/array_assign_scalar.c +++ b/numpy/_core/src/multiarray/array_assign_scalar.c @@ -37,7 +37,7 @@ NPY_NO_EXPORT int raw_array_assign_scalar(int ndim, npy_intp const *shape, PyArray_Descr *dst_dtype, char *dst_data, npy_intp const *dst_strides, - PyArray_Descr *src_dtype, char *src_data) + PyArray_Descr *src_dtype, char *src_data, NPY_CASTING casting) { int idim; npy_intp shape_it[NPY_MAXDIMS], dst_strides_it[NPY_MAXDIMS]; @@ -86,13 +86,19 @@ raw_array_assign_scalar(int ndim, npy_intp const *shape, NPY_BEGIN_THREADS_THRESHOLDED(nitems); } + if (((int)casting & NPY_SAME_VALUE_CASTING_FLAG) > 0) { + cast_info.context.flags |= NPY_SAME_VALUE_CONTEXT_FLAG; + } + npy_intp strides[2] = {0, dst_strides_it[0]}; + int result = 0; NPY_RAW_ITER_START(idim, ndim, coord, shape_it) { /* Process the innermost dimension */ char *args[2] = {src_data, dst_data}; - if (cast_info.func(&cast_info.context, - args, &shape_it[0], strides, cast_info.auxdata) < 0) { + result = cast_info.func(&cast_info.context, + args, &shape_it[0], strides, cast_info.auxdata); + if (result < 0) { goto fail; } } NPY_RAW_ITER_ONE_NEXT(idim, ndim, coord, @@ -126,7 +132,7 @@ raw_array_wheremasked_assign_scalar(int ndim, npy_intp const *shape, PyArray_Descr *dst_dtype, char *dst_data, npy_intp const *dst_strides, PyArray_Descr *src_dtype, char *src_data, PyArray_Descr *wheremask_dtype, char *wheremask_data, - npy_intp const *wheremask_strides) + npy_intp const *wheremask_strides, NPY_CASTING casting) { int idim; npy_intp shape_it[NPY_MAXDIMS], dst_strides_it[NPY_MAXDIMS]; @@ -177,8 +183,12 @@ raw_array_wheremasked_assign_scalar(int ndim, npy_intp const *shape, } NPY_BEGIN_THREADS_THRESHOLDED(nitems); } + if (((int)casting & NPY_SAME_VALUE_CASTING_FLAG) != 0) { + cast_info.context.flags |= NPY_SAME_VALUE_CONTEXT_FLAG; + } npy_intp strides[2] = {0, dst_strides_it[0]}; + int result = 0; NPY_RAW_ITER_START(idim, ndim, coord, shape_it) { /* Process the innermost dimension */ @@ -186,10 +196,11 @@ raw_array_wheremasked_assign_scalar(int ndim, npy_intp const *shape, stransfer = (PyArray_MaskedStridedUnaryOp *)cast_info.func; char *args[2] = {src_data, dst_data}; - if (stransfer(&cast_info.context, + result = stransfer(&cast_info.context, args, &shape_it[0], strides, (npy_bool *)wheremask_data, wheremask_strides_it[0], - cast_info.auxdata) < 0) { + cast_info.auxdata); + if (result < 0) { goto fail; } } NPY_RAW_ITER_TWO_NEXT(idim, ndim, coord, shape_it, @@ -298,7 +309,7 @@ PyArray_AssignRawScalar(PyArrayObject *dst, /* Do the assignment with raw array iteration */ if (raw_array_assign_scalar(PyArray_NDIM(dst), PyArray_DIMS(dst), PyArray_DESCR(dst), PyArray_DATA(dst), PyArray_STRIDES(dst), - src_dtype, src_data) < 0) { + src_dtype, src_data, casting) < 0) { goto fail; } } @@ -319,7 +330,7 @@ PyArray_AssignRawScalar(PyArrayObject *dst, PyArray_DESCR(dst), PyArray_DATA(dst), PyArray_STRIDES(dst), src_dtype, src_data, PyArray_DESCR(wheremask), PyArray_DATA(wheremask), - wheremask_strides) < 0) { + wheremask_strides, casting) < 0) { goto fail; } } diff --git a/numpy/_core/src/multiarray/array_coercion.c b/numpy/_core/src/multiarray/array_coercion.c index ff7d98bd9c64..2de639611bf6 100644 --- a/numpy/_core/src/multiarray/array_coercion.c +++ b/numpy/_core/src/multiarray/array_coercion.c @@ -479,20 +479,13 @@ npy_cast_raw_scalar_item( NPY_NO_EXPORT int PyArray_Pack(PyArray_Descr *descr, void *item, PyObject *value) { - PyArrayObject_fields arr_fields = { - .flags = NPY_ARRAY_WRITEABLE, /* assume array is not behaved. */ - }; - Py_SET_TYPE(&arr_fields, &PyArray_Type); - Py_SET_REFCNT(&arr_fields, 1); - if (NPY_UNLIKELY(descr->type_num == NPY_OBJECT)) { /* * We always have store objects directly, casting will lose some * type information. Any other dtype discards the type information. * TODO: For a Categorical[object] this path may be necessary? */ - arr_fields.descr = descr; - return PyDataType_GetArrFuncs(descr)->setitem(value, item, &arr_fields); + return NPY_DT_CALL_setitem(descr, value, item); } /* discover_dtype_from_pyobject includes a check for is_known_scalar_type */ @@ -527,8 +520,7 @@ PyArray_Pack(PyArray_Descr *descr, void *item, PyObject *value) if (DType == NPY_DTYPE(descr) || DType == (PyArray_DTypeMeta *)Py_None) { /* We can set the element directly (or at least will try to) */ Py_XDECREF(DType); - arr_fields.descr = descr; - return PyDataType_GetArrFuncs(descr)->setitem(value, item, &arr_fields); + return NPY_DT_CALL_setitem(descr, value, item); } PyArray_Descr *tmp_descr; tmp_descr = NPY_DT_CALL_discover_descr_from_pyobject(DType, value); @@ -546,8 +538,7 @@ PyArray_Pack(PyArray_Descr *descr, void *item, PyObject *value) if (PyDataType_FLAGCHK(tmp_descr, NPY_NEEDS_INIT)) { memset(data, 0, tmp_descr->elsize); } - arr_fields.descr = tmp_descr; - if (PyDataType_GetArrFuncs(tmp_descr)->setitem(value, data, &arr_fields) < 0) { + if (NPY_DT_CALL_setitem(tmp_descr, value, data) < 0) { PyObject_Free(data); Py_DECREF(tmp_descr); return -1; @@ -1148,7 +1139,7 @@ PyArray_DiscoverDTypeAndShape_Recursive( force_sequence_due_to_char_dtype: /* Ensure we have a sequence (required for PyPy) */ - seq = PySequence_Fast(obj, "Could not convert object to sequence"); + seq = PySequence_Fast(obj, "Could not convert object to sequence"); // noqa: borrowed-ref - manual fix needed if (seq == NULL) { /* * Specifically do not fail on things that look like a dictionary, diff --git a/numpy/_core/src/multiarray/array_converter.c b/numpy/_core/src/multiarray/array_converter.c index 496173038954..578e7b1554f4 100644 --- a/numpy/_core/src/multiarray/array_converter.c +++ b/numpy/_core/src/multiarray/array_converter.c @@ -83,7 +83,7 @@ array_converter_new( } else { item->array = (PyArrayObject *)PyArray_FromAny_int( - item->object, NULL, NULL, 0, 0, 0, NULL, + item->object, NULL, NULL, 0, NPY_MAXDIMS, 0, NULL, &item->scalar_input); if (item->array == NULL) { goto fail; @@ -97,7 +97,7 @@ array_converter_new( Py_INCREF(item->DType); /* - * Check whether we were passed a an int/float/complex Python scalar. + * Check whether we were passed an int/float/complex Python scalar. * If not, set `descr` and clear pyscalar/scalar flags as needed. */ if (item->scalar_input && npy_mark_tmp_array_if_pyscalar( diff --git a/numpy/_core/src/multiarray/array_method.c b/numpy/_core/src/multiarray/array_method.c index 5554cad5e2dd..c7280435d3c3 100644 --- a/numpy/_core/src/multiarray/array_method.c +++ b/numpy/_core/src/multiarray/array_method.c @@ -4,7 +4,7 @@ * pointers to do fast operations on the given input functions. * It thus adds an abstraction layer around individual ufunc loops. * - * Unlike methods, a ArrayMethod can have multiple inputs and outputs. + * Unlike methods, an ArrayMethod can have multiple inputs and outputs. * This has some serious implication for garbage collection, and as far * as I (@seberg) understands, it is not possible to always guarantee correct * cyclic garbage collection of dynamically created DTypes with methods. @@ -30,8 +30,8 @@ #define _UMATHMODULE #define _MULTIARRAYMODULE -#include #include +#include #include "arrayobject.h" #include "array_coercion.h" #include "array_method.h" @@ -39,6 +39,7 @@ #include "convert_datatype.h" #include "common.h" #include "numpy/ufuncobject.h" +#include "dtype_transfer.h" /* @@ -184,12 +185,17 @@ validate_spec(PyArrayMethod_Spec *spec) "not exceed %d. (method: %s)", NPY_MAXARGS, spec->name); return -1; } - switch (spec->casting) { + switch ((int)spec->casting) { case NPY_NO_CASTING: case NPY_EQUIV_CASTING: case NPY_SAFE_CASTING: case NPY_SAME_KIND_CASTING: case NPY_UNSAFE_CASTING: + case NPY_NO_CASTING | NPY_SAME_VALUE_CASTING_FLAG: + case NPY_EQUIV_CASTING | NPY_SAME_VALUE_CASTING_FLAG: + case NPY_SAFE_CASTING | NPY_SAME_VALUE_CASTING_FLAG: + case NPY_SAME_KIND_CASTING | NPY_SAME_VALUE_CASTING_FLAG: + case NPY_UNSAFE_CASTING | NPY_SAME_VALUE_CASTING_FLAG: break; default: if (spec->casting != -1) { @@ -668,10 +674,11 @@ boundarraymethod__resolve_descripors( if (!parametric) { /* * Non-parametric can only mismatch if it switches from equiv to no - * (e.g. due to byteorder changes). + * (e.g. due to byteorder changes). Throw away same_value casting flag */ + int method_casting = self->method->casting & ~NPY_SAME_VALUE_CASTING_FLAG; if (cast != self->method->casting && - self->method->casting != NPY_EQUIV_CASTING) { + method_casting != NPY_EQUIV_CASTING) { PyErr_Format(PyExc_RuntimeError, "resolve_descriptors cast level changed even though " "the cast is non-parametric where the only possible " @@ -792,11 +799,10 @@ boundarraymethod__simple_strided_call( return NULL; } - PyArrayMethod_Context context = { - .caller = NULL, - .method = self->method, - .descriptors = descrs, - }; + PyArrayMethod_Context context; + NPY_context_init(&context, descrs); + context.method = self->method; + PyArrayMethod_StridedLoop *strided_loop = NULL; NpyAuxData *loop_data = NULL; NPY_ARRAYMETHOD_FLAGS flags = 0; @@ -984,3 +990,4 @@ NPY_NO_EXPORT PyTypeObject PyBoundArrayMethod_Type = { .tp_methods = boundarraymethod_methods, .tp_getset = boundarraymethods_getters, }; + diff --git a/numpy/_core/src/multiarray/array_method.h b/numpy/_core/src/multiarray/array_method.h index bcf270899f13..303425e38274 100644 --- a/numpy/_core/src/multiarray/array_method.h +++ b/numpy/_core/src/multiarray/array_method.h @@ -69,7 +69,7 @@ typedef struct PyArrayMethodObject_tag { /* - * We will sometimes have to create a ArrayMethod and allow passing it around, + * We will sometimes have to create an ArrayMethod and allow passing it around, * similar to `instance.method` returning a bound method, e.g. a function like * `ufunc.resolve()` can return a bound object. * The current main purpose of the BoundArrayMethod is that it holds on to the diff --git a/numpy/_core/src/multiarray/arrayfunction_override.c b/numpy/_core/src/multiarray/arrayfunction_override.c index 9834ab138cf6..72211e2a6d62 100644 --- a/numpy/_core/src/multiarray/arrayfunction_override.c +++ b/numpy/_core/src/multiarray/arrayfunction_override.c @@ -371,7 +371,7 @@ array__get_implementing_args( return NULL; } - relevant_args = PySequence_Fast( + relevant_args = PySequence_Fast( // noqa: borrowed-ref OK relevant_args, "dispatcher for __array_function__ did not return an iterable"); if (relevant_args == NULL) { @@ -518,7 +518,7 @@ dispatcher_vectorcall(PyArray_ArrayFunctionDispatcherObject *self, fix_name_if_typeerror(self); return NULL; } - Py_SETREF(relevant_args, PySequence_Fast(relevant_args, + Py_SETREF(relevant_args, PySequence_Fast(relevant_args, // noqa: borrowed-ref OK "dispatcher for __array_function__ did not return an iterable")); if (relevant_args == NULL) { return NULL; diff --git a/numpy/_core/src/multiarray/arraytypes.c.src b/numpy/_core/src/multiarray/arraytypes.c.src index 9e5588f98a83..d67bdd046c6d 100644 --- a/numpy/_core/src/multiarray/arraytypes.c.src +++ b/numpy/_core/src/multiarray/arraytypes.c.src @@ -3,6 +3,7 @@ #include #include #include +#include #include #define NPY_NO_DEPRECATED_API NPY_API_VERSION @@ -15,6 +16,7 @@ #include "npy_pycompat.h" #include "numpy/npy_math.h" #include "numpy/halffloat.h" +#include "numpy/dtype_api.h" #include "npy_config.h" #include "npy_sort.h" @@ -44,6 +46,20 @@ #include "umathmodule.h" #include "npy_static_data.h" +/**begin repeat + * #NAME = BOOL, + * BYTE, UBYTE, SHORT, USHORT, INT, UINT, + * LONG, ULONG, LONGLONG, ULONGLONG, + * HALF, FLOAT, DOUBLE, LONGDOUBLE, + * CFLOAT, CDOUBLE, CLONGDOUBLE, + * DATETIME, TIMEDELTA# + */ +static inline void +@NAME@_copyswap(void *dst, void *src, int swap, void *arr); + +/**end repeat**/ + + /* * Define a stack allocated dummy array with only the minimum information set: * 1. The descr, the main field interesting here. @@ -96,7 +112,7 @@ MyPyFloat_AsDouble(PyObject *obj) } num = PyNumber_Float(obj); if (num == NULL) { - return NPY_NAN; + return -1; } ret = PyFloat_AS_DOUBLE(num); Py_DECREF(num); @@ -108,6 +124,9 @@ static float MyPyFloat_AsFloat(PyObject *obj) { double d_val = MyPyFloat_AsDouble(obj); + if (error_converting(d_val)) { + return -1; + } float res = (float)d_val; if (NPY_UNLIKELY(npy_isinf(res) && !npy_isinf(d_val))) { if (PyUFunc_GiveFloatingpointErrors("cast", NPY_FPE_OVERFLOW) < 0) { @@ -122,10 +141,13 @@ static npy_half MyPyFloat_AsHalf(PyObject *obj) { double d_val = MyPyFloat_AsDouble(obj); + if (error_converting(d_val)) { + return -1; + } npy_half res = npy_double_to_half(d_val); if (NPY_UNLIKELY(npy_half_isinf(res) && !npy_isinf(d_val))) { if (PyUFunc_GiveFloatingpointErrors("cast", NPY_FPE_OVERFLOW) < 0) { - return npy_double_to_half(-1.); + return -1; // exception return as integer } } return res; @@ -137,10 +159,16 @@ MyPyFloat_FromHalf(npy_half h) return PyFloat_FromDouble(npy_half_to_double(h)); } -/* Handle case of assigning from an array scalar in setitem */ +/* + * Handle case of assigning from an array scalar in setitem. + * NOTE/TODO(seberg): This was important, but is now only used + * for *nested* 0-D arrays which makes it dubious whether it should + * remain used. + * (At the point of writing, I did not want to worry about BC though.) + */ static int -convert_to_scalar_and_retry(PyObject *op, void *ov, void *vap, - int (*setitem)(PyObject *op, void *ov, void *vap)) +convert_to_scalar_and_retry(PyArray_Descr *descr, PyObject *op, char *ov, + int (*setitem)(PyArray_Descr *descr, PyObject *op, char *ov)) { PyObject *temp; @@ -151,7 +179,7 @@ convert_to_scalar_and_retry(PyObject *op, void *ov, void *vap, return -1; } else { - int res = setitem(temp, ov, vap); + int res = setitem(descr, temp, ov); Py_DECREF(temp); return res; } @@ -324,9 +352,8 @@ static PyObject * } NPY_NO_EXPORT int -@TYPE@_setitem(PyObject *op, void *ov, void *vap) +@TYPE@_setitem(PyArray_Descr *descr, PyObject *op, char *ov) { - PyArrayObject *ap = vap; @type@ temp; /* ensures alignment */ #if @is_int@ @@ -364,28 +391,23 @@ NPY_NO_EXPORT int } else { temp = (@type@)@func2@(op); - } - if (PyErr_Occurred()) { - PyObject *type, *value, *traceback; - PyErr_Fetch(&type, &value, &traceback); - if (PySequence_NoString_Check(op)) { - PyErr_SetString(PyExc_ValueError, - "setting an array element with a sequence."); - npy_PyErr_ChainExceptionsCause(type, value, traceback); - } - else { - PyErr_Restore(type, value, traceback); + if (temp == (@type@)-1 && PyErr_Occurred()) { + PyObject *type, *value, *traceback; + PyErr_Fetch(&type, &value, &traceback); + if (PySequence_NoString_Check(op)) { + PyErr_SetString(PyExc_ValueError, + "setting an array element with a sequence."); + npy_PyErr_ChainExceptionsCause(type, value, traceback); + } + else { + PyErr_Restore(type, value, traceback); + } + return -1; } - return -1; - } - if (ap == NULL || PyArray_ISBEHAVED(ap)) { - assert(npy_is_aligned(ov, NPY_ALIGNOF(@type@))); - *((@type@ *)ov)=temp; - } - else { - PyDataType_GetArrFuncs(PyArray_DESCR(ap))->copyswap(ov, &temp, PyArray_ISBYTESWAPPED(ap), - ap); } + // Support descr == NULL for some scalarmath paths. + @TYPE@_copyswap( + ov, &temp, descr != NULL && PyDataType_ISBYTESWAPPED(descr), NULL); return 0; } @@ -431,19 +453,17 @@ static PyObject * * #suffix = f, , l# */ NPY_NO_EXPORT int -@NAME@_setitem(PyObject *op, void *ov, void *vap) +@NAME@_setitem(PyArray_Descr *descr, PyObject *op, char *ov) { - PyArrayObject *ap = vap; Py_complex oop; @type@ temp; - if (PyArray_IsZeroDim(op)) { - return convert_to_scalar_and_retry(op, ov, vap, @NAME@_setitem); - } - if (PyArray_IsScalar(op, @kind@)){ temp = PyArrayScalar_VAL(op, @kind@); } + else if (PyArray_IsZeroDim(op)) { + return convert_to_scalar_and_retry(descr, op, ov, @NAME@_setitem); + } else { if (op == Py_None) { oop.real = NPY_NAN; @@ -502,10 +522,8 @@ NPY_NO_EXPORT int #endif } - memcpy(ov, &temp, NPY_SIZEOF_@NAME@); - if (ap != NULL && PyArray_ISBYTESWAPPED(ap)) { - byte_swap_vector(ov, 2, sizeof(@ftype@)); - } + @NAME@_copyswap( + ov, &temp, descr != NULL && PyDataType_ISBYTESWAPPED(descr), NULL); return 0; } @@ -541,8 +559,8 @@ string_to_long_double(PyObject*op) errno = 0; temp = NumPyOS_ascii_strtold(s, &end); if (errno == ERANGE) { - if (PyErr_Warn(PyExc_RuntimeWarning, - "overflow encountered in conversion from string") < 0) { + if (PyErr_WarnEx(PyExc_RuntimeWarning, + "overflow encountered in conversion from string", 1) < 0) { Py_XDECREF(b); return 0; } @@ -587,19 +605,17 @@ LONGDOUBLE_getitem(void *ip, void *ap) } NPY_NO_EXPORT int -LONGDOUBLE_setitem(PyObject *op, void *ov, void *vap) +LONGDOUBLE_setitem(PyArray_Descr *descr, PyObject *op, char *ov) { - PyArrayObject *ap = vap; /* ensure alignment */ npy_longdouble temp; - if (PyArray_IsZeroDim(op)) { - return convert_to_scalar_and_retry(op, ov, vap, LONGDOUBLE_setitem); - } - if (PyArray_IsScalar(op, LongDouble)) { temp = PyArrayScalar_VAL(op, LongDouble); } + else if (PyArray_IsZeroDim(op)) { + return convert_to_scalar_and_retry(descr, op, ov, LONGDOUBLE_setitem); + } else { /* In case something funny happened in PyArray_IsScalar */ if (PyErr_Occurred()) { @@ -610,13 +626,9 @@ LONGDOUBLE_setitem(PyObject *op, void *ov, void *vap) if (PyErr_Occurred()) { return -1; } - if (ap == NULL || PyArray_ISBEHAVED(ap)) { - *((npy_longdouble *)ov) = temp; - } - else { - copy_and_swap(ov, &temp, PyArray_ITEMSIZE(ap), 1, 0, - PyArray_ISBYTESWAPPED(ap)); - } + // Support descr == NULL for scalarmath paths + LONGDOUBLE_copyswap( + ov, &temp, descr != NULL && PyDataType_ISBYTESWAPPED(descr), NULL); return 0; } @@ -662,12 +674,10 @@ UNICODE_getitem(void *ip, void *vap) } static int -UNICODE_setitem(PyObject *op, void *ov, void *vap) +UNICODE_setitem(PyArray_Descr *descr, PyObject *op, char *ov) { - PyArrayObject *ap = vap; - if (PyArray_IsZeroDim(op)) { - return convert_to_scalar_and_retry(op, ov, vap, UNICODE_setitem); + return convert_to_scalar_and_retry(descr, op, ov, UNICODE_setitem); } if (PySequence_NoString_Check(op)) { @@ -689,7 +699,7 @@ UNICODE_setitem(PyObject *op, void *ov, void *vap) } /* truncate if needed */ - Py_ssize_t max_len = PyArray_ITEMSIZE(ap) >> 2; + Py_ssize_t max_len = descr->elsize >> 2; Py_ssize_t actual_len = PyUnicode_GetLength(temp); if (actual_len < 0) { Py_DECREF(temp); @@ -706,7 +716,8 @@ UNICODE_setitem(PyObject *op, void *ov, void *vap) Py_ssize_t num_bytes = actual_len * 4; char *buffer; - if (!PyArray_ISALIGNED(ap)) { + int aligned = npy_is_aligned(ov, NPY_ALIGNOF(Py_UCS4)); + if (!aligned) { buffer = PyArray_malloc(num_bytes); if (buffer == NULL) { Py_DECREF(temp); @@ -723,16 +734,16 @@ UNICODE_setitem(PyObject *op, void *ov, void *vap) return -1; } - if (!PyArray_ISALIGNED(ap)) { + if (!aligned) { memcpy(ov, buffer, num_bytes); PyArray_free(buffer); } /* Fill in the rest of the space with 0 */ - if (PyArray_ITEMSIZE(ap) > num_bytes) { - memset((char*)ov + num_bytes, 0, (PyArray_ITEMSIZE(ap) - num_bytes)); + if (descr->elsize > num_bytes) { + memset((char*)ov + num_bytes, 0, (descr->elsize - num_bytes)); } - if (PyArray_ISBYTESWAPPED(ap)) { + if (PyDataType_ISBYTESWAPPED(descr)) { byte_swap_vector(ov, actual_len, 4); } Py_DECREF(temp); @@ -760,15 +771,14 @@ STRING_getitem(void *ip, void *vap) } static int -STRING_setitem(PyObject *op, void *ov, void *vap) +STRING_setitem(PyArray_Descr *descr, PyObject *op, char *ov) { - PyArrayObject *ap = vap; char *ptr; Py_ssize_t len; PyObject *temp = NULL; if (PyArray_IsZeroDim(op)) { - return convert_to_scalar_and_retry(op, ov, vap, STRING_setitem); + return convert_to_scalar_and_retry(descr, op, ov, STRING_setitem); } if (PySequence_NoString_Check(op)) { @@ -806,13 +816,13 @@ STRING_setitem(PyObject *op, void *ov, void *vap) Py_DECREF(temp); return -1; } - memcpy(ov, ptr, PyArray_MIN(PyArray_ITEMSIZE(ap),len)); + memcpy(ov, ptr, PyArray_MIN(descr->elsize, len)); /* * If string length is smaller than room in array * Then fill the rest of the element size with NULL */ - if (PyArray_ITEMSIZE(ap) > len) { - memset((char *)ov + len, 0, (PyArray_ITEMSIZE(ap) - len)); + if (descr->elsize > len) { + memset((char *)ov + len, 0, (descr->elsize - len)); } Py_DECREF(temp); return 0; @@ -839,7 +849,7 @@ OBJECT_getitem(void *ip, void *NPY_UNUSED(ap)) static int -OBJECT_setitem(PyObject *op, void *ov, void *NPY_UNUSED(ap)) +OBJECT_setitem(PyArray_Descr *descr, PyObject *op, char *ov) { PyObject *obj; @@ -865,11 +875,9 @@ VOID_getitem(void *input, void *vap) _PyArray_LegacyDescr *descr = (_PyArray_LegacyDescr *)PyArray_DESCR(vap); if (PyDataType_HASFIELDS(descr)) { - PyObject *key; PyObject *names; int i, n; PyObject *ret; - PyObject *tup; PyArrayObject_fields dummy_fields = get_dummy_stack_array(ap); PyArrayObject *dummy_arr = (PyArrayObject *)&dummy_fields; @@ -880,9 +888,7 @@ VOID_getitem(void *input, void *vap) for (i = 0; i < n; i++) { npy_intp offset; PyArray_Descr *new; - key = PyTuple_GET_ITEM(names, i); - tup = PyDict_GetItem(descr->fields, key); - if (_unpack_field(tup, &new, &offset) < 0) { + if (_unpack_field_index(descr, i, &new, &offset) < 0) { Py_DECREF(ret); return NULL; } @@ -967,14 +973,10 @@ NPY_NO_EXPORT int _setup_field(int i, _PyArray_LegacyDescr *descr, PyArrayObject *arr, npy_intp *offset_p, char *dstdata) { - PyObject *key; - PyObject *tup; PyArray_Descr *new; npy_intp offset; - key = PyTuple_GET_ITEM(descr->names, i); - tup = PyDict_GetItem(descr->fields, key); - if (_unpack_field(tup, &new, &offset) < 0) { + if (_unpack_field_index(descr, i, &new, &offset) < 0) { return -1; } @@ -1027,18 +1029,17 @@ _copy_and_return_void_setitem(_PyArray_LegacyDescr *dstdescr, char *dstdata, } static int -VOID_setitem(PyObject *op, void *input, void *vap) +VOID_setitem(PyArray_Descr *descr_, PyObject *op, char *ip) { - char *ip = input; - PyArrayObject *ap = vap; - int itemsize = PyArray_ITEMSIZE(ap); + _PyArray_LegacyDescr *descr = (_PyArray_LegacyDescr *)descr_; + int itemsize = descr->elsize; int res; - _PyArray_LegacyDescr *descr = (_PyArray_LegacyDescr *)PyArray_DESCR(ap); if (PyDataType_HASFIELDS(descr)) { PyObject *errmsg; npy_int i; npy_intp offset; + PyArray_Descr *field_descr; int failed = 0; /* If op is 0d-ndarray or numpy scalar, directly get dtype & data ptr */ @@ -1071,23 +1072,18 @@ VOID_setitem(PyObject *op, void *input, void *vap) return -1; } - PyArrayObject_fields dummy_fields = get_dummy_stack_array(ap); - PyArrayObject *dummy_arr = (PyArrayObject *)&dummy_fields; - for (i = 0; i < names_size; i++) { - PyObject *item; - - if (_setup_field(i, descr, dummy_arr, &offset, ip) == -1) { + PyObject *item = PyTuple_GetItem(op, i); + if (item == NULL) { failed = 1; break; } - item = PyTuple_GetItem(op, i); - if (item == NULL) { + if (_unpack_field_index(descr, i, &field_descr, &offset) < 0) { failed = 1; break; } /* use setitem to set this field */ - if (PyArray_SETITEM(dummy_arr, ip + offset, item) < 0) { + if (NPY_DT_CALL_setitem(field_descr, item, ip + offset) < 0) { failed = 1; break; } @@ -1097,17 +1093,13 @@ VOID_setitem(PyObject *op, void *input, void *vap) /* Otherwise must be non-void scalar. Try to assign to each field */ npy_intp names_size = PyTuple_GET_SIZE(descr->names); - PyArrayObject_fields dummy_fields = get_dummy_stack_array(ap); - PyArrayObject *dummy_arr = (PyArrayObject *)&dummy_fields; - for (i = 0; i < names_size; i++) { - /* temporarily make ap have only this field */ - if (_setup_field(i, descr, dummy_arr, &offset, ip) == -1) { + if (_unpack_field_index(descr, i, &field_descr, &offset) < 0) { failed = 1; break; } /* use setitem to set this field */ - if (PyArray_SETITEM(dummy_arr, ip + offset, op) < 0) { + if (NPY_DT_CALL_setitem(field_descr, op, ip + offset) < 0) { failed = 1; break; } @@ -1137,7 +1129,7 @@ VOID_setitem(PyObject *op, void *input, void *vap) PyArrayObject *ret = (PyArrayObject *)PyArray_NewFromDescrAndBase( &PyArray_Type, descr->subarray->base, shape.len, shape.ptr, NULL, ip, - PyArray_FLAGS(ap), NULL, NULL); + NPY_ARRAY_WRITEABLE, NULL, NULL); npy_free_cache_dim_obj(shape); if (!ret) { return -1; @@ -1215,15 +1207,14 @@ TIMEDELTA_getitem(void *ip, void *vap) } static int -DATETIME_setitem(PyObject *op, void *ov, void *vap) +DATETIME_setitem(PyArray_Descr *descr, PyObject *op, char *ov) { - PyArrayObject *ap = vap; /* ensure alignment */ npy_datetime temp = 0; PyArray_DatetimeMetaData *meta = NULL; /* Get the datetime units metadata */ - meta = get_datetime_metadata_from_dtype(PyArray_DESCR(ap)); + meta = get_datetime_metadata_from_dtype(descr); if (meta == NULL) { return -1; } @@ -1235,27 +1226,20 @@ DATETIME_setitem(PyObject *op, void *ov, void *vap) } /* Copy the value into the output */ - if (ap == NULL || PyArray_ISBEHAVED(ap)) { - *((npy_datetime *)ov)=temp; - } - else { - PyDataType_GetArrFuncs(PyArray_DESCR(ap))->copyswap(ov, &temp, PyArray_ISBYTESWAPPED(ap), - ap); - } - + DATETIME_copyswap( + ov, &temp, descr != NULL && PyDataType_ISBYTESWAPPED(descr), NULL); return 0; } static int -TIMEDELTA_setitem(PyObject *op, void *ov, void *vap) +TIMEDELTA_setitem(PyArray_Descr *descr, PyObject *op, char *ov) { - PyArrayObject *ap = vap; /* ensure alignment */ npy_timedelta temp = 0; PyArray_DatetimeMetaData *meta = NULL; /* Get the datetime units metadata */ - meta = get_datetime_metadata_from_dtype(PyArray_DESCR(ap)); + meta = get_datetime_metadata_from_dtype(descr); if (meta == NULL) { return -1; } @@ -1266,19 +1250,39 @@ TIMEDELTA_setitem(PyObject *op, void *ov, void *vap) return -1; } - /* Copy the value into the output */ - if (ap == NULL || PyArray_ISBEHAVED(ap)) { - *((npy_timedelta *)ov)=temp; - } - else { - PyDataType_GetArrFuncs(PyArray_DESCR(ap))->copyswap(ov, &temp, PyArray_ISBYTESWAPPED(ap), - ap); - } - + TIMEDELTA_copyswap( + ov, &temp, descr != NULL && PyDataType_ISBYTESWAPPED(descr), NULL); return 0; } +/**begin repeat + * + * #NAME = BOOL, + * BYTE, UBYTE, SHORT, USHORT, INT, UINT, + * LONG, ULONG, LONGLONG, ULONGLONG, + * HALF, FLOAT, DOUBLE, LONGDOUBLE, + * CFLOAT, CDOUBLE, CLONGDOUBLE, + * OBJECT, STRING, UNICODE, VOID, + * DATETIME, TIMEDELTA# + */ + +/* + * Legacy fallback setitem, should be deprecated, but if anyone calls + * our setitem *without* an array (or stealing it for their dtype) + * they might need it. E.g. a NumPy 3 should probably just dump it all, though. + */ +static int +@NAME@_legacy_setitem(PyObject *value, void *data, void *vap) +{ + // Most builtins allow descr to be NULL traditionally, so assume it's OK + PyArray_Descr *descr = vap == NULL ? NULL : PyArray_DESCR((PyArrayObject *)vap); + return @NAME@_setitem(descr, value, data); +} + +/**end repeat**/ + + /* ***************************************************************************** ** TYPE TO TYPE CONVERSIONS ** @@ -1351,7 +1355,7 @@ static void while (n--) { @type1@ t = (@type1@)*ip++; - *op++ = t; + *op++ = (@type2@)t; #if @steps@ == 2 /* complex type */ *op++ = 0; #endif @@ -1534,7 +1538,7 @@ static void if (temp == NULL) { return; } - if (@to@_setitem(temp, op, aop)) { + if (@to@_setitem(PyArray_DESCR(aop), temp, (char *)op)) { Py_DECREF(temp); return; } @@ -1582,7 +1586,7 @@ static void Py_INCREF(Py_False); temp = Py_False; } - if (@to@_setitem(temp, op, aop)) { + if (@to@_setitem(PyArray_DESCR(aop), temp, (char *)op)) { Py_DECREF(temp); return; } @@ -1947,7 +1951,7 @@ _basic_copy(void *dst, void *src, int elsize) { * npy_half, npy_float, npy_double, npy_longdouble, * npy_datetime, npy_timedelta# */ -static void +static inline void @fname@_copyswapn (void *dst, npy_intp dstride, void *src, npy_intp sstride, npy_intp n, int swap, void *NPY_UNUSED(arr)) { @@ -1958,7 +1962,7 @@ static void } } -static void +static inline void @fname@_copyswap (void *dst, void *src, int swap, void *NPY_UNUSED(arr)) { /* copy first if needed */ @@ -2040,7 +2044,7 @@ static void /* ignore swap */ } -static void +static inline void @fname@_copyswap (void *dst, void *src, int NPY_UNUSED(swap), void *NPY_UNUSED(arr)) { @@ -2073,7 +2077,7 @@ static void } } -static void +static inline void @fname@_copyswap (void *dst, void *src, int swap, void *NPY_UNUSED(arr)) { /* copy first if needed */ @@ -2218,7 +2222,7 @@ OBJECT_copyswapn(PyObject **dst, npy_intp dstride, PyObject **src, return; } -static void +static inline void OBJECT_copyswap(PyObject **dst, PyObject **src, int NPY_UNUSED(swap), void *NPY_UNUSED(arr)) { @@ -2274,7 +2278,7 @@ VOID_copyswapn (char *dst, npy_intp dstride, char *src, npy_intp sstride, PyArrayObject_fields dummy_fields = get_dummy_stack_array(arr); PyArrayObject *dummy_arr = (PyArrayObject *)&dummy_fields; - while (PyDict_Next(descr->fields, &pos, &key, &value)) { + while (PyDict_Next(descr->fields, &pos, &key, &value)) { // noqa: borrowed-ref OK npy_intp offset; PyArray_Descr *new; if (NPY_TITLE_KEY(key, value)) { @@ -2359,7 +2363,7 @@ VOID_copyswap (char *dst, char *src, int swap, PyArrayObject *arr) PyArrayObject_fields dummy_fields = get_dummy_stack_array(arr); PyArrayObject *dummy_arr = (PyArrayObject *)&dummy_fields; - while (PyDict_Next(descr->fields, &pos, &key, &value)) { + while (PyDict_Next(descr->fields, &pos, &key, &value)) { // noqa: borrowed-ref OK npy_intp offset; PyArray_Descr * new; @@ -2449,7 +2453,7 @@ UNICODE_copyswapn (char *dst, npy_intp dstride, char *src, npy_intp sstride, } -static void +static inline void STRING_copyswap(char *dst, char *src, int NPY_UNUSED(swap), PyArrayObject *arr) { assert(arr != NULL); @@ -2460,7 +2464,7 @@ STRING_copyswap(char *dst, char *src, int NPY_UNUSED(swap), PyArrayObject *arr) _basic_copy(dst, src, PyArray_ITEMSIZE(arr)); } -static void +static inline void UNICODE_copyswap (char *dst, char *src, int swap, PyArrayObject *arr) { int itemsize; @@ -2679,7 +2683,7 @@ VOID_nonzero (char *ip, PyArrayObject *ap) PyArrayObject *dummy_arr = (PyArrayObject *)&dummy_fields; _PyArray_LegacyDescr *descr = (_PyArray_LegacyDescr *)PyArray_DESCR(ap); - while (PyDict_Next(descr->fields, &pos, &key, &value)) { + while (PyDict_Next(descr->fields, &pos, &key, &value)) { // noqa: borrowed-ref OK PyArray_Descr * new; npy_intp offset; if (NPY_TITLE_KEY(key, value)) { @@ -3016,9 +3020,8 @@ UNICODE_compare(npy_ucs4 *ip1, npy_ucs4 *ip2, static int VOID_compare(char *ip1, char *ip2, PyArrayObject *ap) { - PyArray_Descr *descr; - PyObject *names, *key; - PyObject *tup; + _PyArray_LegacyDescr *descr; + PyObject *names; PyArrayObject_fields dummy_struct; PyArrayObject *dummy = (PyArrayObject *)&dummy_struct; char *nip1, *nip2; @@ -3031,18 +3034,16 @@ VOID_compare(char *ip1, char *ip2, PyArrayObject *ap) if (mem_handler == NULL) { goto finish; } - descr = PyArray_DESCR(ap); + descr = (_PyArray_LegacyDescr *)PyArray_DESCR(ap); /* * Compare on the first-field. If equal, then * compare on the second-field, etc. */ - names = PyDataType_NAMES(descr); + names = descr->names; for (i = 0; i < PyTuple_GET_SIZE(names); i++) { PyArray_Descr *new; npy_intp offset; - key = PyTuple_GET_ITEM(names, i); - tup = PyDict_GetItem(PyDataType_FIELDS(descr), key); - if (_unpack_field(tup, &new, &offset) < 0) { + if (_unpack_field_index(descr, i, &new, &offset) < 0) { goto finish; } /* Set the fields needed by compare or copyswap */ @@ -4027,7 +4028,7 @@ static PyArray_ArrFuncs _Py@NAME@_ArrFuncs = { @from@_to_VOID }, @from@_getitem, - @from@_setitem, + @from@_legacy_setitem, (PyArray_CopySwapNFunc*)@from@_copyswapn, (PyArray_CopySwapFunc*)@from@_copyswap, (PyArray_CompareFunc*)@from@_compare, @@ -4146,7 +4147,7 @@ static PyArray_ArrFuncs _Py@NAME@_ArrFuncs = { @from@_to_VOID }, @from@_getitem, - @from@_setitem, + @from@_legacy_setitem, (PyArray_CopySwapNFunc*)@from@_copyswapn, (PyArray_CopySwapFunc*)@from@_copyswap, (PyArray_CompareFunc*)@from@_compare, @@ -4318,10 +4319,202 @@ PyArray_DescrFromType(int type) /* ***************************************************************************** - ** SETUP TYPE INFO ** + ** NEWSTYLE TYPE METHODS ** ***************************************************************************** */ +static int +BOOL_get_constant(PyArray_Descr *descr, int constant_id, void *ptr) +{ + switch (constant_id) { + case NPY_CONSTANT_zero: + case NPY_CONSTANT_minimum_finite: + *(npy_bool *)ptr = NPY_FALSE; + return 1; + case NPY_CONSTANT_one: + case NPY_CONSTANT_maximum_finite: + *(npy_bool *)ptr = NPY_TRUE; + return 1; + default: + return 0; + } +} +/**begin repeat + * #NAME = BYTE, UBYTE, SHORT, USHORT, INT, UINT, + * LONG, ULONG, LONGLONG, ULONGLONG# + * #type = npy_byte, npy_ubyte, npy_short, npy_ushort, npy_int, npy_uint, + * npy_long, npy_ulong, npy_longlong, npy_ulonglong# + * #IS_UNSIGNED = 0, 1, 0, 1, 0, 1, + * 0, 1, 0, 1# + * #MIN = NPY_MIN_BYTE, 0, NPY_MIN_SHORT, 0, NPY_MIN_INT, 0, + * NPY_MIN_LONG, 0, NPY_MIN_LONGLONG, 0# + */ +static int +@NAME@_get_constant(PyArray_Descr *descr, int constant_id, void *ptr) +{ + @type@ val; + switch (constant_id) { + case NPY_CONSTANT_zero: + val = 0; + break; + case NPY_CONSTANT_one: + val = 1; + break; + case NPY_CONSTANT_minimum_finite: +#if @IS_UNSIGNED@ + val = 0; +#else + val = @MIN@; +#endif + break; + case NPY_CONSTANT_maximum_finite: + val = NPY_MAX_@NAME@; + break; + default: + return 0; + } + @NAME@_copyswap(ptr, &val, !PyArray_ISNBO(descr->byteorder), NULL); + return 1; +} +/**end repeat**/ + +/* +Keeping Half macros consistent with standard C +Refernce: https://en.cppreference.com/w/c/types/limits.html +*/ +#define HALF_MAX 31743 /* Bit pattern for 65504.0 */ +#define HALF_MIN 1024 /* Bit pattern for smallest positive normal: 2^-14 */ +#define HALF_NEG_MAX 64511 /* Bit pattern for -65504.0 */ +#define HALF_EPSILON 5120 +#define HALF_TRUE_MIN 0x0001 /* Bit pattern for smallest positive subnormal: 2^-24 */ +#define HALF_MAX_EXP 16 +#define HALF_MIN_EXP -13 +#define HALF_MANT_DIG 11 /* 10 + 1 (implicit) */ +#define HALF_DIG 3 + +/* + * On PPC64 systems with IBM double-double format pair of IEEE binary64 + * values (not a true IEEE quad). We derived the values based on the Interval machine epsilon definition of epsilon, + * difference between 1.0 and the next representable floating-point number larger than 1.0 + * ~106 bits of mantissa precision (53+53) gives epsilon of 2^-105, but glibc returns 2^-1074 (DBL_TRUE_MIN). + */ +#if defined(HAVE_LDOUBLE_IBM_DOUBLE_DOUBLE_LE) || \ + defined(HAVE_LDOUBLE_IBM_DOUBLE_DOUBLE_BE) + #undef LDBL_EPSILON + #define LDBL_EPSILON 0x1p-105L /* 2^-105 */ +#endif + +/* + * Define *_TRUE_MIN macros for smallest subnormal values if not available. + * Use nextafter(0, 1) to get the smallest positive representable value. + */ +#ifndef FLT_TRUE_MIN + #define FLT_TRUE_MIN npy_nextafterf(0.0f, 1.0f) +#endif +#ifndef DBL_TRUE_MIN + #define DBL_TRUE_MIN npy_nextafter(0.0, 1.0) +#endif +#ifndef LDBL_TRUE_MIN + #define LDBL_TRUE_MIN npy_nextafterl(0.0L, 1.0L) +#endif + +/**begin repeat + * #NAME = HALF,FLOAT, DOUBLE, LONGDOUBLE# + * #ABB = HALF, FLT, DBL, LDBL# + * #type = npy_half, npy_float, npy_double, npy_longdouble# + * #RADIX = 16384, 2, 2, 2# + * #NEG_MAX = HALF_NEG_MAX, -FLT_MAX, -DBL_MAX, -LDBL_MAX# + */ +static int +@NAME@_get_constant(PyArray_Descr *descr, int constant_id, void *ptr) +{ + @type@ val; + switch (constant_id) { + case NPY_CONSTANT_zero: + val = 0; + break; + case NPY_CONSTANT_one: + val = 1; + break; + case NPY_CONSTANT_minimum_finite: + val = @NEG_MAX@; + break; + case NPY_CONSTANT_maximum_finite: + #if defined(HAVE_LDOUBLE_IBM_DOUBLE_DOUBLE_LE) || \ + defined(HAVE_LDOUBLE_IBM_DOUBLE_DOUBLE_BE) + /* For IBM double-double, use nextafter(inf, 0) to get the true + * maximum representable value (matches old MachArLike behavior) */ + if (sizeof(@type@) == sizeof(npy_longdouble)) { + val = npy_nextafterl((@type@)NPY_INFINITY, (@type@)0.0L); + break; + } + #endif + val = @ABB@_MAX; + break; + case NPY_CONSTANT_inf: + val = (@type@)NPY_INFINITYF; + break; + case NPY_CONSTANT_nan: + val = (@type@)NPY_NANF; + break; + case NPY_CONSTANT_finfo_radix: + val = @RADIX@; + break; + case NPY_CONSTANT_finfo_eps: + val = @ABB@_EPSILON; + break; + case NPY_CONSTANT_finfo_smallest_normal: + val = @ABB@_MIN; + break; + case NPY_CONSTANT_finfo_smallest_subnormal: + val = @ABB@_TRUE_MIN; + break; + case NPY_CONSTANT_finfo_nmant: + *(npy_intp *)ptr = @ABB@_MANT_DIG - 1; + return 1; + case NPY_CONSTANT_finfo_min_exp: + /* + Definition: Minimum negative integer such that FLT_RADIX raised by power one less than that integer is a normalized float, double and long double respectively + + refernce: https://en.cppreference.com/w/c/types/limits.html + */ + *(npy_intp *)ptr = @ABB@_MIN_EXP - 1; + return 1; + case NPY_CONSTANT_finfo_max_exp: + *(npy_intp *)ptr = @ABB@_MAX_EXP; + return 1; + case NPY_CONSTANT_finfo_decimal_digits: + *(npy_intp *)ptr = @ABB@_DIG; + return 1; + default: + return 0; + } + @NAME@_copyswap(ptr, &val, !PyArray_ISNBO(descr->byteorder), NULL); + return 1; +} +/**end repeat**/ + + +/**begin repeat + * #NAME = CFLOAT, CDOUBLE, CLONGDOUBLE, + * OBJECT, STRING, UNICODE, VOID, + * DATETIME, TIMEDELTA# + */ +static int +@NAME@_get_constant(PyArray_Descr *descr, int constant_id, void *ptr) +{ + // TODO: We currently don't use this, but we could quickly for + // reduction identity/initial value so should implement these. + return 0; +} +/**end repeat**/ + + +/* + ***************************************************************************** + ** SETUP TYPE INFO ** + ***************************************************************************** + */ /* * This function is called during numpy module initialization, @@ -4331,6 +4524,7 @@ NPY_NO_EXPORT int set_typeinfo(PyObject *dict) { PyObject *infodict = NULL; + PyArray_DTypeMeta *dtypemeta; // borrowed int i; _PyArray_LegacyDescr *dtype; @@ -4385,7 +4579,7 @@ set_typeinfo(PyObject *dict) * PyArray_ComplexAbstractDType*3, * PyArrayDescr_Type*6 # */ - if (dtypemeta_wrap_legacy_descriptor( + dtypemeta = dtypemeta_wrap_legacy_descriptor( _builtin_descrs[NPY_@NAME@], &_Py@Name@_ArrFuncs, (PyTypeObject *)&@scls@, @@ -4395,9 +4589,12 @@ set_typeinfo(PyObject *dict) #else NULL #endif - ) < 0) { + ); + if (dtypemeta == NULL) { return -1; } + NPY_DT_SLOTS(dtypemeta)->setitem = @NAME@_setitem; + NPY_DT_SLOTS(dtypemeta)->get_constant = @NAME@_get_constant; /**end repeat**/ diff --git a/numpy/_core/src/multiarray/arraytypes.h.src b/numpy/_core/src/multiarray/arraytypes.h.src index a5613aa8dad6..59dc836a2de5 100644 --- a/numpy/_core/src/multiarray/arraytypes.h.src +++ b/numpy/_core/src/multiarray/arraytypes.h.src @@ -1,6 +1,10 @@ #ifndef NUMPY_CORE_SRC_MULTIARRAY_ARRAYTYPES_H_ #define NUMPY_CORE_SRC_MULTIARRAY_ARRAYTYPES_H_ +#ifdef __cplusplus +extern "C" { +#endif + #include "common.h" NPY_NO_EXPORT int @@ -40,7 +44,7 @@ small_correlate(const char * d_, npy_intp dstride, */ NPY_NO_EXPORT int -@TYPE@_setitem(PyObject *obj, void *data_ptr, void *arr); +@TYPE@_setitem(PyArray_Descr *descr, PyObject *obj, char *data_ptr); /**end repeat**/ @@ -165,4 +169,8 @@ NPY_CPU_DISPATCH_DECLARE(NPY_NO_EXPORT int BOOL_argmax, NPY_NO_EXPORT npy_intp count_nonzero_trivial_dispatcher(npy_intp count, const char* data, npy_intp stride, int dtype_num); +#ifdef __cplusplus +} +#endif + #endif /* NUMPY_CORE_SRC_MULTIARRAY_ARRAYTYPES_H_ */ diff --git a/numpy/_core/src/multiarray/buffer.c b/numpy/_core/src/multiarray/buffer.c index fcff3ad6ca74..a6c683f26a8b 100644 --- a/numpy/_core/src/multiarray/buffer.c +++ b/numpy/_core/src/multiarray/buffer.c @@ -268,7 +268,7 @@ _buffer_format_string(PyArray_Descr *descr, _tmp_string_t *str, int ret; name = PyTuple_GET_ITEM(ldescr->names, k); - item = PyDict_GetItem(ldescr->fields, name); + item = PyDict_GetItem(ldescr->fields, name); // noqa: borrowed-ref OK child = (PyArray_Descr*)PyTuple_GetItem(item, 0); offset_obj = PyTuple_GetItem(item, 1); diff --git a/numpy/_core/src/multiarray/calculation.c b/numpy/_core/src/multiarray/calculation.c index 87f03a94fa5f..b95b37987f8e 100644 --- a/numpy/_core/src/multiarray/calculation.c +++ b/numpy/_core/src/multiarray/calculation.c @@ -576,7 +576,7 @@ PyArray_Round(PyArrayObject *a, int decimals, PyArrayObject *out) Py_INCREF(arr); } else { - arr = PyArray_Copy(a); + arr = PyArray_NewCopy(a, NPY_KEEPORDER); if (arr == NULL) { return NULL; } @@ -637,8 +637,7 @@ PyArray_Round(PyArrayObject *a, int decimals, PyArrayObject *out) return (PyObject *)out; } else { - Py_INCREF(a); - return (PyObject *)a; + return PyArray_NewCopy(a, NPY_KEEPORDER); } } if (decimals == 0) { @@ -653,7 +652,15 @@ PyArray_Round(PyArrayObject *a, int decimals, PyArrayObject *out) else { op1 = n_ops.true_divide; op2 = n_ops.multiply; - decimals = -decimals; + if (decimals == INT_MIN) { + // not technically correct but it doesn't matter because no one in + // this millenium is using floating point numbers with enough + // accuracy for this to matter + decimals = INT_MAX; + } + else { + decimals = -decimals; + } } if (!out) { if (PyArray_ISINTEGER(a)) { diff --git a/numpy/_core/src/multiarray/common.c b/numpy/_core/src/multiarray/common.c index 8236ec5c65ae..2e9bcbf29e8f 100644 --- a/numpy/_core/src/multiarray/common.c +++ b/numpy/_core/src/multiarray/common.c @@ -25,15 +25,6 @@ * variable is misnamed, but it's part of the public API so I'm not sure we * can just change it. Maybe someone should try and see if anyone notices. */ -/* - * In numpy 1.6 and earlier, this was NPY_UNSAFE_CASTING. In a future - * release, it will become NPY_SAME_KIND_CASTING. Right now, during the - * transitional period, we continue to follow the NPY_UNSAFE_CASTING rules (to - * avoid breaking people's code), but we also check for whether the cast would - * be allowed under the NPY_SAME_KIND_CASTING rules, and if not we issue a - * warning (that people's code will be broken in a future release.) - */ - NPY_NO_EXPORT NPY_CASTING NPY_DEFAULT_ASSIGN_CASTING = NPY_SAME_KIND_CASTING; @@ -337,6 +328,30 @@ _unpack_field(PyObject *value, PyArray_Descr **descr, npy_intp *offset) return 0; } + +/** + * Unpack a field from a structured dtype. The field index must be valid. + * + * @param descr The dtype to unpack. + * @param index The index of the field to unpack. + * @param odescr will be set to the field's dtype + * @param offset will be set to the field's offset + * + * @return -1 on failure, 0 on success. + */ + NPY_NO_EXPORT int + _unpack_field_index( + _PyArray_LegacyDescr *descr, + npy_intp index, + PyArray_Descr **odescr, + npy_intp *offset) + { + PyObject *key = PyTuple_GET_ITEM(descr->names, index); + PyObject *tup = PyDict_GetItem(descr->fields, key); // noqa: borrowed-ref OK + return _unpack_field(tup, odescr, offset); + } + + /* * check whether arrays with datatype dtype might have object fields. This will * only happen for structured dtypes (which may have hidden objects even if the @@ -458,24 +473,6 @@ check_is_convertible_to_scalar(PyArrayObject *v) return 0; } - /* Remove this if-else block when the deprecation expires */ - if (PyArray_SIZE(v) == 1) { - /* Numpy 1.25.0, 2023-01-02 */ - if (DEPRECATE( - "Conversion of an array with ndim > 0 to a scalar " - "is deprecated, and will error in future. " - "Ensure you extract a single element from your array " - "before performing this operation. " - "(Deprecated NumPy 1.25.)") < 0) { - return -1; - } - return 0; - } else { - PyErr_SetString(PyExc_TypeError, - "only length-1 arrays can be converted to Python scalars"); - return -1; - } - PyErr_SetString(PyExc_TypeError, "only 0-dimensional arrays can be converted to Python scalars"); return -1; diff --git a/numpy/_core/src/multiarray/common.h b/numpy/_core/src/multiarray/common.h index e356b8251931..db7bc64733db 100644 --- a/numpy/_core/src/multiarray/common.h +++ b/numpy/_core/src/multiarray/common.h @@ -1,6 +1,8 @@ #ifndef NUMPY_CORE_SRC_MULTIARRAY_COMMON_H_ #define NUMPY_CORE_SRC_MULTIARRAY_COMMON_H_ +#include + #include #include "numpy/npy_common.h" #include "numpy/ndarraytypes.h" @@ -11,6 +13,7 @@ #include "npy_static_data.h" #include "npy_import.h" #include +#include #ifdef __cplusplus extern "C" { @@ -62,12 +65,23 @@ convert_shape_to_string(npy_intp n, npy_intp const *vals, char *ending); NPY_NO_EXPORT void dot_alignment_error(PyArrayObject *a, int i, PyArrayObject *b, int j); + /** * unpack tuple of PyDataType_FIELDS(dtype) (descr, offset, title[not-needed]) */ NPY_NO_EXPORT int _unpack_field(PyObject *value, PyArray_Descr **descr, npy_intp *offset); +/** + * Unpack a field from a structured dtype by index. + */ +NPY_NO_EXPORT int +_unpack_field_index( + _PyArray_LegacyDescr *descr, + npy_intp index, + PyArray_Descr **odescr, + npy_intp *offset); + /* * check whether arrays with datatype dtype might have object fields. This will * only happen for structured dtypes (which may have hidden objects even if the @@ -230,15 +244,6 @@ npy_uint_alignment(int itemsize) * compared to memchr it returns one stride past end instead of NULL if needle * is not found. */ -#ifdef __clang__ - /* - * The code below currently makes use of !NPY_ALIGNMENT_REQUIRED, which - * should be OK but causes the clang sanitizer to warn. It may make - * sense to modify the code to avoid this "unaligned" access but - * it would be good to carefully check the performance changes. - */ - __attribute__((no_sanitize("alignment"))) -#endif static inline char * npy_memchr(char * haystack, char needle, npy_intp stride, npy_intp size, npy_intp * psubloopsize, int invert) @@ -259,11 +264,12 @@ npy_memchr(char * haystack, char needle, } else { /* usually find elements to skip path */ - if (!NPY_ALIGNMENT_REQUIRED && needle == 0 && stride == 1) { + if (needle == 0 && stride == 1) { /* iterate until last multiple of 4 */ char * block_end = haystack + size - (size % sizeof(unsigned int)); while (p < block_end) { - unsigned int v = *(unsigned int*)p; + unsigned int v; + memcpy(&v, p, sizeof(v)); if (v != 0) { break; } diff --git a/numpy/_core/src/multiarray/compiled_base.c b/numpy/_core/src/multiarray/compiled_base.c index 26b898fa1479..e6a45554555f 100644 --- a/numpy/_core/src/multiarray/compiled_base.c +++ b/numpy/_core/src/multiarray/compiled_base.c @@ -450,7 +450,7 @@ _linear_search(const npy_double key, const npy_double *arr, const npy_intp len, /** @brief find index of a sorted array such that arr[i] <= key < arr[i + 1]. * - * If an starting index guess is in-range, the array values around this + * If a starting index guess is in-range, the array values around this * index are first checked. This allows for repeated calls for well-ordered * keys (a very common case) to use the previous index as a very good guess. * @@ -920,11 +920,11 @@ arr_interp_complex(PyObject *NPY_UNUSED(self), PyObject *const *args, Py_ssize_t return NULL; } -static const char *EMPTY_SEQUENCE_ERR_MSG = "indices must be integral: the provided " \ +static const char EMPTY_SEQUENCE_ERR_MSG[] = "indices must be integral: the provided " \ "empty sequence was inferred as float. Wrap it with " \ "'np.array(indices, dtype=np.intp)'"; -static const char *NON_INTEGRAL_ERROR_MSG = "only int indices permitted"; +static const char NON_INTEGRAL_ERROR_MSG[] = "only int indices permitted"; /* Convert obj to an ndarray with integer dtype or fail */ static PyArrayObject * @@ -1465,7 +1465,7 @@ arr_add_docstring(PyObject *NPY_UNUSED(dummy), PyObject *const *args, Py_ssize_t PyObject *obj; PyObject *str; const char *docstr; - static char *msg = "already has a different docstring"; + static const char msg[] = "already has a different docstring"; /* Don't add docstrings */ #if PY_VERSION_HEX > 0x030b0000 @@ -1522,7 +1522,7 @@ arr_add_docstring(PyObject *NPY_UNUSED(dummy), PyObject *const *args, Py_ssize_t PyTypeObject *new = (PyTypeObject *)obj; _ADDDOC(new->tp_doc, new->tp_name); if (new->tp_dict != NULL && PyDict_CheckExact(new->tp_dict) && - PyDict_GetItemString(new->tp_dict, "__doc__") == Py_None) { + PyDict_GetItemString(new->tp_dict, "__doc__") == Py_None) { // noqa: borrowed-ref - manual fix needed /* Warning: Modifying `tp_dict` is not generally safe! */ if (PyDict_SetItemString(new->tp_dict, "__doc__", str) < 0) { return NULL; @@ -1620,19 +1620,15 @@ pack_inner(const char *inptr, bb[1] = npyv_tobits_b8(npyv_cmpneq_u8(v1, v_zero)); bb[2] = npyv_tobits_b8(npyv_cmpneq_u8(v2, v_zero)); bb[3] = npyv_tobits_b8(npyv_cmpneq_u8(v3, v_zero)); - if(out_stride == 1 && - (!NPY_ALIGNMENT_REQUIRED || isAligned)) { - npy_uint64 *ptr64 = (npy_uint64*)outptr; + if(out_stride == 1 && isAligned) { #if NPY_SIMD_WIDTH == 16 - npy_uint64 bcomp = bb[0] | (bb[1] << 16) | (bb[2] << 32) | (bb[3] << 48); - ptr64[0] = bcomp; + npy_uint64 arr[1] = {bb[0] | (bb[1] << 16) | (bb[2] << 32) | (bb[3] << 48)}; #elif NPY_SIMD_WIDTH == 32 - ptr64[0] = bb[0] | (bb[1] << 32); - ptr64[1] = bb[2] | (bb[3] << 32); + npy_uint64 arr[2] = {bb[0] | (bb[1] << 32), bb[2] | (bb[3] << 32)}; #else - ptr64[0] = bb[0]; ptr64[1] = bb[1]; - ptr64[2] = bb[2]; ptr64[3] = bb[3]; + npy_uint64 arr[4] = {bb[0], bb[1], bb[2], bb[3]}; #endif + memcpy(outptr, arr, sizeof(arr)); outptr += vstepx4; } else { for(int i = 0; i < 4; i++) { diff --git a/numpy/_core/src/multiarray/conversion_utils.c b/numpy/_core/src/multiarray/conversion_utils.c index 5ada3e6e4faf..164aa2e4c8b4 100644 --- a/numpy/_core/src/multiarray/conversion_utils.c +++ b/numpy/_core/src/multiarray/conversion_utils.c @@ -130,7 +130,7 @@ PyArray_IntpConverter(PyObject *obj, PyArray_Dims *seq) * dimension_from_scalar as soon as possible. */ if (!PyLong_CheckExact(obj) && PySequence_Check(obj)) { - seq_obj = PySequence_Fast(obj, + seq_obj = PySequence_Fast(obj, // noqa: borrowed-ref - manual fix needed "expected a sequence of integers or a single integer."); if (seq_obj == NULL) { /* continue attempting to parse as a single integer. */ @@ -438,15 +438,11 @@ PyArray_ConvertMultiAxis(PyObject *axis_in, int ndim, npy_bool *out_axis_flags) NPY_NO_EXPORT int PyArray_BoolConverter(PyObject *object, npy_bool *val) { - if (PyObject_IsTrue(object)) { - *val = NPY_TRUE; - } - else { - *val = NPY_FALSE; - } - if (PyErr_Occurred()) { + int bool_val = PyObject_IsTrue(object); + if (bool_val == -1) { return NPY_FAIL; } + *val = (npy_bool)bool_val; return NPY_SUCCEED; } @@ -460,15 +456,11 @@ PyArray_OptionalBoolConverter(PyObject *object, int *val) if (object == Py_None) { return NPY_SUCCEED; } - if (PyObject_IsTrue(object)) { - *val = 1; - } - else { - *val = 0; - } - if (PyErr_Occurred()) { + int bool_val = PyObject_IsTrue(object); + if (bool_val == -1) { return NPY_FAIL; } + *val = (npy_bool)bool_val; return NPY_SUCCEED; } @@ -919,7 +911,7 @@ PyArray_CorrelatemodeConverter(PyObject *object, NPY_CORRELATEMODE *val) } } -static int casting_parser(char const *str, Py_ssize_t length, void *data) +static int casting_parser_full(char const *str, Py_ssize_t length, void *data, int can_use_same_value) { NPY_CASTING *casting = (NPY_CASTING *)data; if (length < 2) { @@ -949,6 +941,10 @@ static int casting_parser(char const *str, Py_ssize_t length, void *data) *casting = NPY_SAME_KIND_CASTING; return 0; } + if (can_use_same_value && length == 10 && strcmp(str, "same_value") == 0) { + *casting = NPY_SAME_VALUE_CASTING; + return 0; + } break; case 's': if (length == 6 && strcmp(str, "unsafe") == 0) { @@ -960,6 +956,11 @@ static int casting_parser(char const *str, Py_ssize_t length, void *data) return -1; } +static int casting_parser(char const *str, Py_ssize_t length, void *data) +{ + return casting_parser_full(str, length, data, 0); +} + /*NUMPY_API * Convert any Python object, *obj*, to an NPY_CASTING enum. */ @@ -969,10 +970,26 @@ PyArray_CastingConverter(PyObject *obj, NPY_CASTING *casting) return string_converter_helper( obj, (void *)casting, casting_parser, "casting", "must be one of 'no', 'equiv', 'safe', " - "'same_kind', or 'unsafe'"); + "'same_kind', 'unsafe'"); + return 0; +} + +static int casting_parser_same_value(char const *str, Py_ssize_t length, void *data) +{ + return casting_parser_full(str, length, data, 1); +} + +NPY_NO_EXPORT int +PyArray_CastingConverterSameValue(PyObject *obj, NPY_CASTING *casting) +{ + return string_converter_helper( + obj, (void *)casting, casting_parser_same_value, "casting", + "must be one of 'no', 'equiv', 'safe', " + "'same_kind', 'unsafe', 'same_value'"); return 0; } + /***************************** * Other conversion functions *****************************/ @@ -1143,7 +1160,7 @@ PyArray_IntpFromSequence(PyObject *seq, npy_intp *vals, int maxvals) { PyObject *seq_obj = NULL; if (!PyLong_CheckExact(seq) && PySequence_Check(seq)) { - seq_obj = PySequence_Fast(seq, + seq_obj = PySequence_Fast(seq, // noqa: borrowed-ref - manual fix needed "expected a sequence of integers or a single integer"); if (seq_obj == NULL) { /* continue attempting to parse as a single integer. */ diff --git a/numpy/_core/src/multiarray/convert.c b/numpy/_core/src/multiarray/convert.c index 8e0177616955..ccd883f2b0f4 100644 --- a/numpy/_core/src/multiarray/convert.c +++ b/numpy/_core/src/multiarray/convert.c @@ -335,11 +335,7 @@ NPY_NO_EXPORT PyObject * PyArray_ToString(PyArrayObject *self, NPY_ORDER order) { npy_intp numbytes; - npy_intp i; - char *dptr; - int elsize; PyObject *ret; - PyArrayIterObject *it; if (order == NPY_ANYORDER) order = PyArray_ISFORTRAN(self) ? NPY_FORTRANORDER : NPY_CORDER; @@ -354,41 +350,65 @@ PyArray_ToString(PyArrayObject *self, NPY_ORDER order) numbytes = PyArray_NBYTES(self); if ((PyArray_IS_C_CONTIGUOUS(self) && (order == NPY_CORDER)) || (PyArray_IS_F_CONTIGUOUS(self) && (order == NPY_FORTRANORDER))) { - ret = PyBytes_FromStringAndSize(PyArray_DATA(self), (Py_ssize_t) numbytes); + return PyBytes_FromStringAndSize(PyArray_DATA(self), (Py_ssize_t) numbytes); } - else { - PyObject *new; - if (order == NPY_FORTRANORDER) { - /* iterators are always in C-order */ - new = PyArray_Transpose(self, NULL); - if (new == NULL) { - return NULL; - } + + /* Avoid Ravel where possible for fewer copies. */ + if (!PyDataType_REFCHK(PyArray_DESCR(self)) && + ((PyArray_DESCR(self)->flags & NPY_NEEDS_INIT) == 0)) { + + /* Allocate final Bytes Object */ + ret = PyBytes_FromStringAndSize(NULL, (Py_ssize_t) numbytes); + if (ret == NULL) { + return NULL; } - else { - Py_INCREF(self); - new = (PyObject *)self; + + /* Writable Buffer */ + char* dest = PyBytes_AS_STRING(ret); + + int flags = NPY_ARRAY_WRITEABLE; + if (order == NPY_FORTRANORDER) { + flags |= NPY_ARRAY_F_CONTIGUOUS; } - it = (PyArrayIterObject *)PyArray_IterNew(new); - Py_DECREF(new); - if (it == NULL) { + + Py_INCREF(PyArray_DESCR(self)); + /* Array view */ + PyArrayObject *dest_array = (PyArrayObject *)PyArray_NewFromDescr( + &PyArray_Type, + PyArray_DESCR(self), + PyArray_NDIM(self), + PyArray_DIMS(self), + NULL, // strides + dest, + flags, + NULL + ); + + if (dest_array == NULL) { + Py_DECREF(ret); return NULL; } - ret = PyBytes_FromStringAndSize(NULL, (Py_ssize_t) numbytes); - if (ret == NULL) { - Py_DECREF(it); + + /* Copy directly from source to destination with proper ordering */ + if (PyArray_CopyInto(dest_array, self) < 0) { + Py_DECREF(dest_array); + Py_DECREF(ret); return NULL; } - dptr = PyBytes_AS_STRING(ret); - i = it->size; - elsize = PyArray_ITEMSIZE(self); - while (i--) { - memcpy(dptr, it->dataptr, elsize); - dptr += elsize; - PyArray_ITER_NEXT(it); - } - Py_DECREF(it); + + Py_DECREF(dest_array); + return ret; + + } + + /* Non-contiguous, Has References and/or Init Path. */ + PyArrayObject *contig = (PyArrayObject *)PyArray_Ravel(self, order); + if (contig == NULL) { + return NULL; } + + ret = PyBytes_FromStringAndSize(PyArray_DATA(contig), numbytes); + Py_DECREF(contig); return ret; } @@ -429,7 +449,7 @@ PyArray_FillWithScalar(PyArrayObject *arr, PyObject *obj) int retcode = raw_array_assign_scalar( PyArray_NDIM(arr), PyArray_DIMS(arr), descr, PyArray_BYTES(arr), PyArray_STRIDES(arr), - descr, (void *)value); + descr, (void *)value, NPY_UNSAFE_CASTING); if (PyDataType_REFCHK(descr)) { PyArray_ClearBuffer(descr, (void *)value, 0, 1, 1); diff --git a/numpy/_core/src/multiarray/convert_datatype.c b/numpy/_core/src/multiarray/convert_datatype.c index 59b6298b5815..dbab8b4253d8 100644 --- a/numpy/_core/src/multiarray/convert_datatype.c +++ b/numpy/_core/src/multiarray/convert_datatype.c @@ -260,6 +260,10 @@ _get_castingimpl(PyObject *NPY_UNUSED(module), PyObject *args) * Supports the NPY_CAST_IS_VIEW check, and should be preferred to allow * extending cast-levels if necessary. * It is not valid for one of the arguments to be -1 to indicate an error. + * Pass through NPY_SAME_VALUE_CASTING_FLAG on casting1, unless both have the + * flag, in which case return max_casting | NPY_SAME_VALUE_CASTING_FLAG. + * Usually this will be exactly NPY_SAME_VALUE_CASTING, but the logic here + * should handle other 'casting with same_value' options * * @param casting1 First (left-hand) casting level to compare * @param casting2 Second (right-hand) casting level to compare @@ -271,11 +275,14 @@ PyArray_MinCastSafety(NPY_CASTING casting1, NPY_CASTING casting2) if (casting1 < 0 || casting2 < 0) { return -1; } + int both_same_casting = casting1 & casting2 & NPY_SAME_VALUE_CASTING_FLAG; + casting1 &= ~NPY_SAME_VALUE_CASTING_FLAG; + casting2 &= ~NPY_SAME_VALUE_CASTING_FLAG; /* larger casting values are less safe */ if (casting1 > casting2) { - return casting1; + return casting1 | both_same_casting; } - return casting2; + return casting2 | both_same_casting; } @@ -344,7 +351,7 @@ PyArray_GetCastFunc(PyArray_Descr *descr, int type_num) PyObject *cobj; key = PyLong_FromLong(type_num); - cobj = PyDict_GetItem(obj, key); + cobj = PyDict_GetItem(obj, key); // noqa: borrowed-ref OK Py_DECREF(key); if (cobj && PyCapsule_CheckExact(cobj)) { castfunc = PyCapsule_GetPointer(cobj, NULL); @@ -746,13 +753,13 @@ can_cast_pyscalar_scalar_to( } else if (PyDataType_ISFLOAT(to)) { if (flags & NPY_ARRAY_WAS_PYTHON_COMPLEX) { - return casting == NPY_UNSAFE_CASTING; + return ((casting == NPY_UNSAFE_CASTING) || ((casting & NPY_SAME_VALUE_CASTING_FLAG) > 0)); } return 1; } else if (PyDataType_ISINTEGER(to)) { if (!(flags & NPY_ARRAY_WAS_PYTHON_INT)) { - return casting == NPY_UNSAFE_CASTING; + return ((casting == NPY_UNSAFE_CASTING) || ((casting & NPY_SAME_VALUE_CASTING_FLAG) > 0)); } return 1; } @@ -828,7 +835,7 @@ PyArray_CanCastArrayTo(PyArrayObject *arr, PyArray_Descr *to, NPY_NO_EXPORT const char * npy_casting_to_string(NPY_CASTING casting) { - switch (casting) { + switch ((int)casting) { case NPY_NO_CASTING: return "'no'"; case NPY_EQUIV_CASTING: @@ -839,6 +846,16 @@ npy_casting_to_string(NPY_CASTING casting) return "'same_kind'"; case NPY_UNSAFE_CASTING: return "'unsafe'"; + case NPY_NO_CASTING | NPY_SAME_VALUE_CASTING_FLAG: + return "'no and same_value'"; + case NPY_EQUIV_CASTING | NPY_SAME_VALUE_CASTING_FLAG: + return "'equiv and same_value'"; + case NPY_SAFE_CASTING | NPY_SAME_VALUE_CASTING_FLAG: + return "'safe and same_value'"; + case NPY_SAME_KIND_CASTING | NPY_SAME_VALUE_CASTING_FLAG: + return "'same_kind and same_value'"; + case NPY_UNSAFE_CASTING | NPY_SAME_VALUE_CASTING_FLAG: + return "'same_value'"; default: return ""; } @@ -2116,9 +2133,9 @@ legacy_same_dtype_resolve_descriptors( if (PyDataType_ISNOTSWAPPED(loop_descrs[0]) == PyDataType_ISNOTSWAPPED(loop_descrs[1])) { *view_offset = 0; - return NPY_NO_CASTING; + return NPY_NO_CASTING | NPY_SAME_VALUE_CASTING_FLAG; } - return NPY_EQUIV_CASTING; + return NPY_EQUIV_CASTING | NPY_SAME_VALUE_CASTING_FLAG; } @@ -2305,6 +2322,7 @@ add_numeric_cast(PyArray_DTypeMeta *from, PyArray_DTypeMeta *to) if (dtypes[0]->singleton->kind == dtypes[1]->singleton->kind && from_itemsize == to_itemsize) { spec.casting = NPY_EQUIV_CASTING; + spec.casting |= NPY_SAME_VALUE_CASTING_FLAG; /* When there is no casting (equivalent C-types) use byteswap loops */ slots[0].slot = NPY_METH_resolve_descriptors; @@ -2319,13 +2337,17 @@ add_numeric_cast(PyArray_DTypeMeta *from, PyArray_DTypeMeta *to) } else if (_npy_can_cast_safely_table[from->type_num][to->type_num]) { spec.casting = NPY_SAFE_CASTING; - } - else if (dtype_kind_to_ordering(dtypes[0]->singleton->kind) <= - dtype_kind_to_ordering(dtypes[1]->singleton->kind)) { - spec.casting = NPY_SAME_KIND_CASTING; + spec.casting |= NPY_SAME_VALUE_CASTING_FLAG; } else { - spec.casting = NPY_UNSAFE_CASTING; + if (dtype_kind_to_ordering(dtypes[0]->singleton->kind) <= + dtype_kind_to_ordering(dtypes[1]->singleton->kind)) { + spec.casting = NPY_SAME_KIND_CASTING; + } + else { + spec.casting = NPY_UNSAFE_CASTING; + } + spec.casting |= NPY_SAME_VALUE_CASTING_FLAG; } /* Create a bound method, unbind and store it */ @@ -2463,10 +2485,10 @@ cast_to_string_resolve_descriptors( return -1; } - if (self->casting == NPY_UNSAFE_CASTING) { + if ((self->casting == NPY_UNSAFE_CASTING) || ((self->casting & NPY_SAME_VALUE_CASTING_FLAG) > 0)){ assert(dtypes[0]->type_num == NPY_UNICODE && dtypes[1]->type_num == NPY_STRING); - return NPY_UNSAFE_CASTING; + return self->casting; } if (loop_descrs[1]->elsize >= size) { @@ -2749,7 +2771,7 @@ nonstructured_to_structured_resolve_descriptors( Py_ssize_t pos = 0; PyObject *key, *tuple; - while (PyDict_Next(to_descr->fields, &pos, &key, &tuple)) { + while (PyDict_Next(to_descr->fields, &pos, &key, &tuple)) { // noqa: borrowed-ref OK PyArray_Descr *field_descr = (PyArray_Descr *)PyTuple_GET_ITEM(tuple, 0); npy_intp field_view_off = NPY_MIN_INTP; NPY_CASTING field_casting = PyArray_GetCastInfo( @@ -2898,7 +2920,7 @@ structured_to_nonstructured_resolve_descriptors( return -1; } PyObject *key = PyTuple_GetItem(PyDataType_NAMES(given_descrs[0]), 0); - PyObject *base_tup = PyDict_GetItem(PyDataType_FIELDS(given_descrs[0]), key); + PyObject *base_tup = PyDict_GetItem(PyDataType_FIELDS(given_descrs[0]), key); // noqa: borrowed-ref OK base_descr = (PyArray_Descr *)PyTuple_GET_ITEM(base_tup, 0); struct_view_offset = PyLong_AsSsize_t(PyTuple_GET_ITEM(base_tup, 1)); if (error_converting(struct_view_offset)) { @@ -3033,7 +3055,7 @@ can_cast_fields_safety( for (Py_ssize_t i = 0; i < field_count; i++) { npy_intp field_view_off = NPY_MIN_INTP; PyObject *from_key = PyTuple_GET_ITEM(PyDataType_NAMES(from), i); - PyObject *from_tup = PyDict_GetItemWithError(PyDataType_FIELDS(from), from_key); + PyObject *from_tup = PyDict_GetItemWithError(PyDataType_FIELDS(from), from_key); // noqa: borrowed-ref OK if (from_tup == NULL) { return give_bad_field_error(from_key); } @@ -3041,7 +3063,7 @@ can_cast_fields_safety( /* Check whether the field names match */ PyObject *to_key = PyTuple_GET_ITEM(PyDataType_NAMES(to), i); - PyObject *to_tup = PyDict_GetItem(PyDataType_FIELDS(to), to_key); + PyObject *to_tup = PyDict_GetItem(PyDataType_FIELDS(to), to_key); // noqa: borrowed-ref OK if (to_tup == NULL) { return give_bad_field_error(from_key); } diff --git a/numpy/_core/src/multiarray/ctors.c b/numpy/_core/src/multiarray/ctors.c index f7efe5041ab3..3a43e1bd983b 100644 --- a/numpy/_core/src/multiarray/ctors.c +++ b/numpy/_core/src/multiarray/ctors.c @@ -992,15 +992,16 @@ PyArray_NewFromDescr( int nd, npy_intp const *dims, npy_intp const *strides, void *data, int flags, PyObject *obj) { - if (subtype == NULL) { + if (descr == NULL) { PyErr_SetString(PyExc_ValueError, - "subtype is NULL in PyArray_NewFromDescr"); + "descr is NULL in PyArray_NewFromDescr"); return NULL; } - if (descr == NULL) { + if (subtype == NULL) { PyErr_SetString(PyExc_ValueError, - "descr is NULL in PyArray_NewFromDescr"); + "subtype is NULL in PyArray_NewFromDescr"); + Py_DECREF(descr); return NULL; } @@ -1304,12 +1305,12 @@ _array_from_buffer_3118(PyObject *memoryview) return NULL; } - if (PyErr_Warn( + if (PyErr_WarnEx( PyExc_RuntimeWarning, "A builtin ctypes object gave a PEP3118 format " "string that does not match its itemsize, so a " "best-guess will be made of the data type. " - "Newer versions of python may behave correctly.") < 0) { + "Newer versions of python may behave correctly.", 1) < 0) { Py_DECREF(descr); return NULL; } @@ -1508,6 +1509,16 @@ PyArray_FromAny(PyObject *op, PyArray_Descr *newtype, int min_depth, return NULL; } + /* + * The internal implementation treats 0 as actually wanting a zero-dimensional + * array, but the API for this function has typically treated it as + * "anything is fine", so convert here. + * TODO: should we use another value as a placeholder instead? + */ + if (max_depth == 0 || max_depth > NPY_MAXDIMS) { + max_depth = NPY_MAXDIMS; + } + int was_scalar; PyObject* ret = PyArray_FromAny_int( op, dt_info.descr, dt_info.dtype, @@ -1539,7 +1550,7 @@ PyArray_FromAny_int(PyObject *op, PyArray_Descr *in_descr, * This is the main code to make a NumPy array from a Python * Object. It is called from many different places. */ - PyArrayObject *arr = NULL, *ret; + PyArrayObject *arr = NULL, *ret = NULL; PyArray_Descr *dtype = NULL; coercion_cache_obj *cache = NULL; int ndim = 0; @@ -1560,12 +1571,14 @@ PyArray_FromAny_int(PyObject *op, PyArray_Descr *in_descr, copy = 1; } + Py_BEGIN_CRITICAL_SECTION(op); + ndim = PyArray_DiscoverDTypeAndShape( - op, NPY_MAXDIMS, dims, &cache, in_DType, in_descr, &dtype, + op, max_depth, dims, &cache, in_DType, in_descr, &dtype, copy, &was_copied_by__array__); if (ndim < 0) { - return NULL; + goto cleanup; } /* If the cache is NULL, then the object is considered a scalar */ @@ -1578,16 +1591,14 @@ PyArray_FromAny_int(PyObject *op, PyArray_Descr *in_descr, if (min_depth != 0 && ndim < min_depth) { PyErr_SetString(PyExc_ValueError, "object of too small depth for desired array"); - Py_DECREF(dtype); npy_free_coercion_cache(cache); - return NULL; + goto cleanup; } - if (max_depth != 0 && ndim > max_depth) { + if (ndim > max_depth) { PyErr_SetString(PyExc_ValueError, "object too deep for desired array"); - Py_DECREF(dtype); npy_free_coercion_cache(cache); - return NULL; + goto cleanup; } /* Got the correct parameters, but the cache may already hold the result */ @@ -1602,9 +1613,11 @@ PyArray_FromAny_int(PyObject *op, PyArray_Descr *in_descr, if (was_copied_by__array__ == 1) { flags = flags & ~NPY_ARRAY_ENSURECOPY; } - PyObject *res = PyArray_FromArray(arr, dtype, flags); + // PyArray_FromArray steals a reference to the dtype + Py_INCREF(dtype); + ret = (PyArrayObject *)PyArray_FromArray(arr, dtype, flags); npy_unlink_coercion_cache(cache); - return res; + goto cleanup; } else if (cache == NULL && PyArray_IsScalar(op, Void) && !(((PyVoidScalarObject *)op)->flags & NPY_ARRAY_OWNDATA) && @@ -1619,13 +1632,15 @@ PyArray_FromAny_int(PyObject *op, PyArray_Descr *in_descr, * provide a dtype (newtype is NULL). */ assert(ndim == 0); - - return PyArray_NewFromDescrAndBase( + // PyArray_NewFromDescrAndBase steals a reference to the dtype + Py_INCREF(dtype); + ret = (PyArrayObject *)PyArray_NewFromDescrAndBase( &PyArray_Type, dtype, 0, NULL, NULL, ((PyVoidScalarObject *)op)->obval, ((PyVoidScalarObject *)op)->flags, NULL, op); + goto cleanup; } /* * If we got this far, we definitely have to create a copy, since we are @@ -1633,9 +1648,8 @@ PyArray_FromAny_int(PyObject *op, PyArray_Descr *in_descr, */ if (flags & NPY_ARRAY_ENSURENOCOPY) { PyErr_SetString(PyExc_ValueError, npy_no_copy_err_msg); - Py_DECREF(dtype); npy_free_coercion_cache(cache); - return NULL; + goto cleanup; } if (cache == NULL && in_descr != NULL && @@ -1662,16 +1676,18 @@ PyArray_FromAny_int(PyObject *op, PyArray_Descr *in_descr, * have a better solution at some point): * https://github.com/pandas-dev/pandas/issues/35481 */ - return PyArray_FromScalar(op, dtype); + // PyArray_FromScalar steals a reference to dtype + Py_INCREF(dtype); + ret = (PyArrayObject *)PyArray_FromScalar(op, dtype); + goto cleanup; } /* There was no array (or array-like) passed in directly. */ if (flags & NPY_ARRAY_WRITEBACKIFCOPY) { PyErr_SetString(PyExc_TypeError, "WRITEBACKIFCOPY used for non-array input."); - Py_DECREF(dtype); npy_free_coercion_cache(cache); - return NULL; + goto cleanup; } /* Create a new array and copy the data */ @@ -1681,8 +1697,7 @@ PyArray_FromAny_int(PyObject *op, PyArray_Descr *in_descr, flags&NPY_ARRAY_F_CONTIGUOUS, NULL); if (ret == NULL) { npy_free_coercion_cache(cache); - Py_DECREF(dtype); - return NULL; + goto cleanup; } if (ndim == PyArray_NDIM(ret)) { /* @@ -1699,12 +1714,10 @@ PyArray_FromAny_int(PyObject *op, PyArray_Descr *in_descr, assert(ndim == 0); if (PyArray_Pack(dtype, PyArray_BYTES(ret), op) < 0) { - Py_DECREF(dtype); - Py_DECREF(ret); - return NULL; + Py_CLEAR(ret); + goto cleanup; } - Py_DECREF(dtype); - return (PyObject *)ret; + goto cleanup; } assert(ndim != 0); assert(op == cache->converted_obj); @@ -1717,15 +1730,18 @@ PyArray_FromAny_int(PyObject *op, PyArray_Descr *in_descr, ((PyArrayObject_fields *)ret)->descr = dtype; } - int success = PyArray_AssignFromCache(ret, cache); + int succeed = PyArray_AssignFromCache(ret, cache); ((PyArrayObject_fields *)ret)->nd = out_ndim; ((PyArrayObject_fields *)ret)->descr = out_descr; - Py_DECREF(dtype); - if (success < 0) { - Py_DECREF(ret); - return NULL; + if (succeed < 0) { + Py_CLEAR(ret); } + +cleanup:; + + Py_XDECREF(dtype); + Py_END_CRITICAL_SECTION(); return (PyObject *)ret; } @@ -1778,7 +1794,7 @@ PyArray_FromAny_int(PyObject *op, PyArray_Descr *in_descr, */ NPY_NO_EXPORT PyObject * PyArray_CheckFromAny(PyObject *op, PyArray_Descr *descr, int min_depth, - int max_depth, int requires, PyObject *context) + int max_depth, int requirements, PyObject *context) { npy_dtype_info dt_info = {NULL, NULL}; @@ -1793,8 +1809,13 @@ PyArray_CheckFromAny(PyObject *op, PyArray_Descr *descr, int min_depth, return NULL; } + /* See comment in PyArray_FromAny for rationale */ + if (max_depth == 0 || max_depth > NPY_MAXDIMS) { + max_depth = NPY_MAXDIMS; + } + PyObject* ret = PyArray_CheckFromAny_int( - op, dt_info.descr, dt_info.dtype, min_depth, max_depth, requires, + op, dt_info.descr, dt_info.dtype, min_depth, max_depth, requirements, context); Py_XDECREF(dt_info.descr); @@ -1809,11 +1830,11 @@ PyArray_CheckFromAny(PyObject *op, PyArray_Descr *descr, int min_depth, NPY_NO_EXPORT PyObject * PyArray_CheckFromAny_int(PyObject *op, PyArray_Descr *in_descr, PyArray_DTypeMeta *in_DType, int min_depth, - int max_depth, int requires, PyObject *context) + int max_depth, int requirements, PyObject *context) { PyObject *obj; Py_XINCREF(in_descr); /* take ownership as we may replace it */ - if (requires & NPY_ARRAY_NOTSWAPPED) { + if (requirements & NPY_ARRAY_NOTSWAPPED) { if (!in_descr && PyArray_Check(op)) { in_descr = PyArray_DESCR((PyArrayObject *)op); Py_INCREF(in_descr); @@ -1828,16 +1849,16 @@ PyArray_CheckFromAny_int(PyObject *op, PyArray_Descr *in_descr, int was_scalar; obj = PyArray_FromAny_int(op, in_descr, in_DType, min_depth, - max_depth, requires, context, &was_scalar); + max_depth, requirements, context, &was_scalar); Py_XDECREF(in_descr); if (obj == NULL) { return NULL; } - if ((requires & NPY_ARRAY_ELEMENTSTRIDES) + if ((requirements & NPY_ARRAY_ELEMENTSTRIDES) && !PyArray_ElementStrides(obj)) { PyObject *ret; - if (requires & NPY_ARRAY_ENSURENOCOPY) { + if (requirements & NPY_ARRAY_ENSURENOCOPY) { PyErr_SetString(PyExc_ValueError, npy_no_copy_err_msg); return NULL; } @@ -2114,7 +2135,7 @@ _is_default_descr(PyObject *descr, PyObject *typestr) { if (!PyList_Check(descr) || PyList_GET_SIZE(descr) != 1) { return 0; } - PyObject *tuple = PyList_GET_ITEM(descr, 0); + PyObject *tuple = PyList_GET_ITEM(descr, 0); // noqa: borrowed-ref - manual fix needed if (!(PyTuple_Check(tuple) && PyTuple_GET_SIZE(tuple) == 2)) { return 0; } @@ -2141,6 +2162,7 @@ PyArray_FromInterface(PyObject *origin) Py_ssize_t i, n; npy_intp dims[NPY_MAXDIMS], strides[NPY_MAXDIMS]; int dataflags = NPY_ARRAY_BEHAVED; + int use_scalar_assign = 0; if (PyArray_LookupSpecial_OnInstance( origin, npy_interned_str.array_interface, &iface) < 0) { @@ -2229,12 +2251,10 @@ PyArray_FromInterface(PyObject *origin) /* Shape must be specified when 'data' is specified */ int result = PyDict_ContainsString(iface, "data"); if (result < 0) { - Py_DECREF(attr); return NULL; } else if (result == 1) { Py_DECREF(iface); - Py_DECREF(attr); PyErr_SetString(PyExc_ValueError, "Missing __array_interface__ shape"); return NULL; @@ -2277,7 +2297,10 @@ PyArray_FromInterface(PyObject *origin) } /* Case for data access through pointer */ - if (attr && PyTuple_Check(attr)) { + if (attr == NULL) { + use_scalar_assign = 1; + } + else if (PyTuple_Check(attr)) { PyObject *dataptr; if (PyTuple_GET_SIZE(attr) != 2) { PyErr_SetString(PyExc_TypeError, @@ -2309,7 +2332,7 @@ PyArray_FromInterface(PyObject *origin) } /* Case for data access through buffer */ - else if (attr) { + else { if (attr != Py_None) { base = attr; } @@ -2366,18 +2389,32 @@ PyArray_FromInterface(PyObject *origin) if (ret == NULL) { goto fail; } - if (data == NULL) { + if (use_scalar_assign) { + /* + * NOTE(seberg): I honestly doubt anyone is using this scalar path and we + * could probably just deprecate (or just remove it in a 3.0 version). + */ if (PyArray_SIZE(ret) > 1) { PyErr_SetString(PyExc_ValueError, "cannot coerce scalar to array with size > 1"); Py_DECREF(ret); goto fail; } - if (PyArray_SETITEM(ret, PyArray_DATA(ret), origin) < 0) { + if (PyArray_Pack(PyArray_DESCR(ret), PyArray_DATA(ret), origin) < 0) { Py_DECREF(ret); goto fail; } } + else if (data == NULL && PyArray_NBYTES(ret) != 0) { + /* Caller should ensure this, but <2.4 used the above scalar coerction path */ + PyErr_SetString(PyExc_ValueError, + "data is NULL but array contains data, in older versions of NumPy " + "this may have used the scalar path. To get the scalar path " + "you must leave the data field undefined."); + Py_DECREF(ret); + goto fail; + } + result = PyDict_GetItemStringRef(iface, "strides", &attr); if (result == -1){ return NULL; @@ -2794,7 +2831,6 @@ PyArray_CopyAsFlat(PyArrayObject *dst, PyArrayObject *src, NPY_ORDER order) count = (src_count < dst_count) ? src_count : dst_count; if (cast_info.func(&cast_info.context, args, &count, strides, cast_info.auxdata) < 0) { - res = -1; break; } @@ -3696,21 +3732,20 @@ PyArray_FromFile(FILE *fp, PyArray_Descr *dtype, npy_intp num, char *sep) } if (((npy_intp) nread) < num) { /* - * Realloc memory for smaller number of elements, use original dtype - * which may have include a subarray (and is used for `nread`). + * Resize array to smaller number of elements. Note that original + * dtype may have included a subarray, so we may not be 1-d. */ - const size_t nsize = PyArray_MAX(nread,1) * dtype->elsize; - char *tmp; - - /* The handler is always valid */ - if((tmp = PyDataMem_UserRENEW(PyArray_DATA(ret), nsize, - PyArray_HANDLER(ret))) == NULL) { + npy_intp dims[NPY_MAXDIMS]; + dims[0] = (npy_intp)nread; + for (int i = 1; i < PyArray_NDIM(ret); i++) { + dims[i] = PyArray_DIMS(ret)[i]; + } + PyArray_Dims new_dims = {dims, PyArray_NDIM(ret)}; + if (PyArray_Resize_int(ret, &new_dims, 0) < 0) { Py_DECREF(dtype); Py_DECREF(ret); - return PyErr_NoMemory(); + return NULL; } - ((PyArrayObject_fields *)ret)->data = tmp; - PyArray_DIMS(ret)[0] = nread; } Py_DECREF(dtype); return (PyObject *)ret; @@ -3962,6 +3997,7 @@ PyArray_FromIter(PyObject *obj, PyArray_Descr *dtype, npy_intp count) PyObject *iter = NULL; PyArrayObject *ret = NULL; npy_intp i, elsize, elcount; + npy_intp dims[NPY_MAXDIMS]; if (dtype == NULL) { return NULL; @@ -4001,6 +4037,9 @@ PyArray_FromIter(PyObject *obj, PyArray_Descr *dtype, npy_intp count) if (ret == NULL) { goto done; } + /* set up for possible resizing */ + memcpy(dims, PyArray_DIMS(ret), PyArray_NDIM(ret)*sizeof(npy_intp)); + PyArray_Dims new_dims = {dims, PyArray_NDIM(ret)}; char *item = PyArray_BYTES(ret); for (i = 0; i < count || count == -1; i++, item += elsize) { @@ -4008,14 +4047,12 @@ PyArray_FromIter(PyObject *obj, PyArray_Descr *dtype, npy_intp count) if (value == NULL) { if (PyErr_Occurred()) { /* Fetching next item failed perhaps due to exhausting iterator */ - goto done; + goto fail; } break; } if (NPY_UNLIKELY(i >= elcount) && elsize != 0) { - char *new_data = NULL; - npy_intp nbytes; /* Grow PyArray_DATA(ret): this is similar for the strategy for PyListObject, but we use @@ -4024,31 +4061,18 @@ PyArray_FromIter(PyObject *obj, PyArray_Descr *dtype, npy_intp count) be suitable to reuse here. */ elcount = (i >> 1) + (i < 4 ? 4 : 2) + i; - if (!npy_mul_sizes_with_overflow(&nbytes, elcount, elsize)) { - /* The handler is always valid */ - new_data = PyDataMem_UserRENEW( - PyArray_BYTES(ret), nbytes, PyArray_HANDLER(ret)); - } - if (new_data == NULL) { - PyErr_SetString(PyExc_MemoryError, - "cannot allocate array memory"); + dims[0] = elcount; + if (PyArray_Resize_int(ret, &new_dims, 0) < 0) { Py_DECREF(value); - goto done; + goto fail; } - ((PyArrayObject_fields *)ret)->data = new_data; - /* resize array for cleanup: */ - PyArray_DIMS(ret)[0] = elcount; /* Reset `item` pointer to point into realloc'd chunk */ - item = new_data + i * elsize; - if (PyDataType_FLAGCHK(dtype, NPY_NEEDS_INIT)) { - /* Initialize new chunk: */ - memset(item, 0, nbytes - i * elsize); - } + item = ((char *)PyArray_DATA(ret)) + i * elsize; } if (PyArray_Pack(dtype, item, value) < 0) { Py_DECREF(value); - goto done; + goto fail; } Py_DECREF(value); } @@ -4057,46 +4081,22 @@ PyArray_FromIter(PyObject *obj, PyArray_Descr *dtype, npy_intp count) PyErr_Format(PyExc_ValueError, "iterator too short: Expected %zd but iterator had only %zd " "items.", (Py_ssize_t)count, (Py_ssize_t)i); - goto done; + goto fail; } /* * Realloc the data so that don't keep extra memory tied up and fix * the arrays first dimension (there could be more than one). */ - if (i == 0 || elsize == 0) { - /* The size cannot be zero for realloc. */ + dims[0] = i; + if (!PyArray_Resize_int(ret, &new_dims, 0)) { + goto done; } - else { - /* Resize array to actual final size (it may be too large) */ - /* The handler is always valid */ - char *new_data = PyDataMem_UserRENEW( - PyArray_DATA(ret), i * elsize, PyArray_HANDLER(ret)); - - if (new_data == NULL) { - PyErr_SetString(PyExc_MemoryError, - "cannot allocate array memory"); - goto done; - } - ((PyArrayObject_fields *)ret)->data = new_data; - if (count < 0) { - /* - * If the count was smaller than zero, the strides may be all 0 - * (even in the later dimensions for `count < 0`! - * Thus, fix all strides here again for C-contiguity. - */ - int oflags; - _array_fill_strides( - PyArray_STRIDES(ret), PyArray_DIMS(ret), PyArray_NDIM(ret), - PyArray_ITEMSIZE(ret), NPY_ARRAY_C_CONTIGUOUS, &oflags); - PyArray_STRIDES(ret)[0] = elsize; - assert(oflags & NPY_ARRAY_C_CONTIGUOUS); - } - } - PyArray_DIMS(ret)[0] = i; + fail: + Py_CLEAR(ret); - done: + done: Py_XDECREF(iter); Py_XDECREF(dtype); if (PyErr_Occurred()) { diff --git a/numpy/_core/src/multiarray/ctors.h b/numpy/_core/src/multiarray/ctors.h index 094589968b66..b7a60e0065e0 100644 --- a/numpy/_core/src/multiarray/ctors.h +++ b/numpy/_core/src/multiarray/ctors.h @@ -68,11 +68,11 @@ PyArray_FromAny(PyObject *op, PyArray_Descr *newtype, int min_depth, NPY_NO_EXPORT PyObject * PyArray_CheckFromAny_int(PyObject *op, PyArray_Descr *in_descr, PyArray_DTypeMeta *in_DType, int min_depth, - int max_depth, int requires, PyObject *context); + int max_depth, int requirements, PyObject *context); NPY_NO_EXPORT PyObject * PyArray_CheckFromAny(PyObject *op, PyArray_Descr *descr, int min_depth, - int max_depth, int requires, PyObject *context); + int max_depth, int requirements, PyObject *context); NPY_NO_EXPORT PyObject * PyArray_FromArray(PyArrayObject *arr, PyArray_Descr *newtype, int flags); diff --git a/numpy/_core/src/multiarray/datetime.c b/numpy/_core/src/multiarray/datetime.c index 9c024dbcd91c..9489e2b92c6a 100644 --- a/numpy/_core/src/multiarray/datetime.c +++ b/numpy/_core/src/multiarray/datetime.c @@ -1233,6 +1233,10 @@ can_cast_datetime64_units(NPY_DATETIMEUNIT src_unit, NPY_DATETIMEUNIT dst_unit, NPY_CASTING casting) { + if ((casting & NPY_SAME_VALUE_CASTING_FLAG) > 0) { + /* TODO: support this */ + return 0; + } switch (casting) { /* Allow anything with unsafe casting */ case NPY_UNSAFE_CASTING: @@ -1278,6 +1282,10 @@ can_cast_timedelta64_units(NPY_DATETIMEUNIT src_unit, NPY_DATETIMEUNIT dst_unit, NPY_CASTING casting) { + if ((casting & NPY_SAME_VALUE_CASTING_FLAG) > 0) { + /* Use SAFE_CASTING, which implies SAME_VALUE */ + casting = NPY_SAFE_CASTING; + } switch (casting) { /* Allow anything with unsafe casting */ case NPY_UNSAFE_CASTING: @@ -1325,6 +1333,10 @@ can_cast_datetime64_metadata(PyArray_DatetimeMetaData *src_meta, PyArray_DatetimeMetaData *dst_meta, NPY_CASTING casting) { + if ((casting & NPY_SAME_VALUE_CASTING_FLAG) > 0) { + /* Force SAFE_CASTING */ + casting = NPY_SAFE_CASTING; + } switch (casting) { case NPY_UNSAFE_CASTING: return 1; @@ -1352,6 +1364,10 @@ can_cast_timedelta64_metadata(PyArray_DatetimeMetaData *src_meta, PyArray_DatetimeMetaData *dst_meta, NPY_CASTING casting) { + if ((casting & NPY_SAME_VALUE_CASTING_FLAG) > 0) { + /* Use SAFE_CASTING, which implies SAME_VALUE */ + casting = NPY_SAFE_CASTING; + } switch (casting) { case NPY_UNSAFE_CASTING: return 1; @@ -2245,8 +2261,8 @@ NpyDatetime_ConvertPyDateTimeToDatetimeStruct( } /* - * Gets a tzoffset in minutes by calling the fromutc() function on - * the Python datetime.tzinfo object. + * Gets a tzoffset in minutes by calling the astimezone() function on + * the Python datetime.datetime object. */ NPY_NO_EXPORT int get_tzoffset_from_pytzinfo(PyObject *timezone_obj, npy_datetimestruct *dts) @@ -2255,14 +2271,14 @@ get_tzoffset_from_pytzinfo(PyObject *timezone_obj, npy_datetimestruct *dts) npy_datetimestruct loc_dts; /* Create a Python datetime to give to the timezone object */ - dt = PyDateTime_FromDateAndTime((int)dts->year, dts->month, dts->day, - dts->hour, dts->min, 0, 0); + dt = PyDateTimeAPI->DateTime_FromDateAndTime((int)dts->year, dts->month, dts->day, + dts->hour, dts->min, 0, 0, PyDateTime_TimeZone_UTC, PyDateTimeAPI->DateTimeType); if (dt == NULL) { return -1; } /* Convert the datetime from UTC to local time */ - loc_dt = PyObject_CallMethod(timezone_obj, "fromutc", "O", dt); + loc_dt = PyObject_CallMethod(dt, "astimezone", "O", timezone_obj); Py_DECREF(dt); if (loc_dt == NULL) { return -1; @@ -2761,10 +2777,10 @@ convert_pyobject_to_timedelta(PyArray_DatetimeMetaData *meta, PyObject *obj, /* * Converts a datetime into a PyObject *. * - * Not-a-time is returned as the string "NaT". - * For days or coarser, returns a datetime.date. - * For microseconds or coarser, returns a datetime.datetime. - * For units finer than microseconds, returns an integer. + * NaT (Not-a-time) is returned as None. + * For D/W/Y/M (days or coarser), returns a datetime.date. + * For Îŧs/ms/s/m/h/D/W (microseconds or coarser), returns a datetime.datetime. + * For ns/ps/fs/as (units shorter than microseconds), returns an integer. */ NPY_NO_EXPORT PyObject * convert_datetime_to_pyobject(npy_datetime dt, PyArray_DatetimeMetaData *meta) @@ -2945,9 +2961,9 @@ convert_timedelta_to_timedeltastruct(PyArray_DatetimeMetaData *meta, /* * Converts a timedelta into a PyObject *. * - * Not-a-time is returned as the string "NaT". - * For microseconds or coarser, returns a datetime.timedelta. - * For units finer than microseconds, returns an integer. + * NaT (Not-a-time) is returned as None. + * For Îŧs/ms/s/m/h/D/W (microseconds or coarser), returns a datetime.timedelta. + * For Y/M (non-linear units), generic units and ns/ps/fs/as (units shorter than microseconds), returns an integer. */ NPY_NO_EXPORT PyObject * convert_timedelta_to_pyobject(npy_timedelta td, PyArray_DatetimeMetaData *meta) @@ -2963,7 +2979,7 @@ convert_timedelta_to_pyobject(npy_timedelta td, PyArray_DatetimeMetaData *meta) /* * If the type's precision is greater than microseconds, is - * Y/M/B (nonlinear units), or is generic units, return an int + * Y/M (nonlinear units), or is generic units, return an int */ if (meta->base > NPY_FR_us || meta->base == NPY_FR_Y || @@ -3118,15 +3134,18 @@ cast_datetime_to_datetime(PyArray_DatetimeMetaData *src_meta, */ NPY_NO_EXPORT int cast_timedelta_to_timedelta(PyArray_DatetimeMetaData *src_meta, - PyArray_DatetimeMetaData *dst_meta, - npy_timedelta src_dt, - npy_timedelta *dst_dt) + PyArray_DatetimeMetaData *dst_meta, + npy_timedelta src_dt, + npy_timedelta *dst_dt) { npy_int64 num = 0, denom = 0; - /* If the metadata is the same, short-circuit the conversion */ - if (src_meta->base == dst_meta->base && - src_meta->num == dst_meta->num) { + /* + * If the metadata is the same or if src_dt is NAT, short-circuit + * the conversion. + */ + if ((src_meta->base == dst_meta->base && src_meta->num == dst_meta->num) + || src_dt == NPY_DATETIME_NAT) { *dst_dt = src_dt; return 0; } diff --git a/numpy/_core/src/multiarray/datetime_strings.c b/numpy/_core/src/multiarray/datetime_strings.c index f92eec3f5a59..97f24cfe821e 100644 --- a/numpy/_core/src/multiarray/datetime_strings.c +++ b/numpy/_core/src/multiarray/datetime_strings.c @@ -984,7 +984,7 @@ NpyDatetime_MakeISO8601Datetime( * the string representation, so ensure that the data * is being cast according to the casting rule. */ - if (casting != NPY_UNSAFE_CASTING) { + if ((casting != NPY_UNSAFE_CASTING) && ((casting & NPY_SAME_VALUE_CASTING_FLAG) == 0)) { /* Producing a date as a local time is always 'unsafe' */ if (base <= NPY_FR_D && local) { PyErr_SetString(PyExc_TypeError, "Cannot create a local " diff --git a/numpy/_core/src/multiarray/descriptor.c b/numpy/_core/src/multiarray/descriptor.c index 5708e5c6ecb7..1fc5b76d1f00 100644 --- a/numpy/_core/src/multiarray/descriptor.c +++ b/numpy/_core/src/multiarray/descriptor.c @@ -15,6 +15,7 @@ #include "npy_config.h" #include "npy_ctypes.h" #include "npy_import.h" +#include "npy_pycompat.h" // PyObject_GetOptionalAttr #include "_datetime.h" @@ -84,63 +85,54 @@ _try_convert_from_ctypes_type(PyTypeObject *type) } /* - * This function creates a dtype object when the object has a "dtype" attribute, - * and it can be converted to a dtype object. + * This function creates a dtype object when the object has a "__numpy_dtype__" + * or "dtype" attribute which must be valid NumPy dtype instance. * * Returns `Py_NotImplemented` if this is not possible. - * Currently the only failure mode for a NULL return is a RecursionError. */ static PyArray_Descr * _try_convert_from_dtype_attr(PyObject *obj) { + int used_dtype_attr = 0; /* For arbitrary objects that have a "dtype" attribute */ - PyObject *dtypedescr = PyObject_GetAttrString(obj, "dtype"); - if (dtypedescr == NULL) { + PyObject *attr; + int res = PyObject_GetOptionalAttr(obj, npy_interned_str.numpy_dtype, &attr); + if (res < 0) { + return NULL; + } + else if (res == 0) { /* - * This can be reached due to recursion limit being hit while fetching - * the attribute (tested for py3.7). This removes the custom message. + * When "__numpy_dtype__" does not exist, also check "dtype". This should + * be removed in the future. + * We do however support a weird `class myclass(np.void): dtype = ...` + * syntax. */ - goto fail; - } - - if (PyArray_DescrCheck(dtypedescr)) { - /* The dtype attribute is already a valid descriptor */ - return (PyArray_Descr *)dtypedescr; + used_dtype_attr = 1; + int res = PyObject_GetOptionalAttr(obj, npy_interned_str.dtype, &attr); + if (res < 0) { + return NULL; + } + else if (res == 0) { + Py_INCREF(Py_NotImplemented); + return (PyArray_Descr *)Py_NotImplemented; + } } - - if (Py_EnterRecursiveCall( - " while trying to convert the given data type from its " - "`.dtype` attribute.") != 0) { - Py_DECREF(dtypedescr); + if (!PyArray_DescrCheck(attr)) { + if (PyType_Check(obj) && PyObject_HasAttrString(attr, "__get__")) { + /* If the object has a __get__, assume this is a class property. */ + Py_DECREF(attr); + Py_INCREF(Py_NotImplemented); + return (PyArray_Descr *)Py_NotImplemented; + } + PyErr_Format(PyExc_ValueError, + "Could not convert %R to a NumPy dtype (via `.%S` value %R).", obj, + used_dtype_attr ? npy_interned_str.dtype : npy_interned_str.numpy_dtype, + attr); + Py_DECREF(attr); return NULL; } - - PyArray_Descr *newdescr = _convert_from_any(dtypedescr, 0); - Py_DECREF(dtypedescr); - Py_LeaveRecursiveCall(); - if (newdescr == NULL) { - goto fail; - } - - Py_DECREF(newdescr); - PyErr_SetString(PyExc_ValueError, "dtype attribute is not a valid dtype instance"); - return NULL; - - fail: - /* Ignore all but recursion errors, to give ctypes a full try. */ - if (!PyErr_ExceptionMatches(PyExc_RecursionError)) { - PyErr_Clear(); - Py_INCREF(Py_NotImplemented); - return (PyArray_Descr *)Py_NotImplemented; - } - return NULL; -} - -/* Expose to another file with a prefixed name */ -NPY_NO_EXPORT PyArray_Descr * -_arraydescr_try_convert_from_dtype_attr(PyObject *obj) -{ - return _try_convert_from_dtype_attr(obj); + /* The dtype attribute is already a valid descriptor */ + return (PyArray_Descr *)attr; } /* @@ -424,7 +416,7 @@ _convert_from_array_descr(PyObject *obj, int align) return NULL; } for (int i = 0; i < n; i++) { - PyObject *item = PyList_GET_ITEM(obj, i); + PyObject *item = PyList_GET_ITEM(obj, i); // noqa: borrowed-ref - manual fix needed if (!PyTuple_Check(item) || (PyTuple_GET_SIZE(item) < 2)) { PyErr_Format(PyExc_TypeError, "Field elements must be 2- or 3-tuples, got '%R'", @@ -507,10 +499,10 @@ _convert_from_array_descr(PyObject *obj, int align) "StringDType is not currently supported for structured dtype fields."); goto fail; } - if ((PyDict_GetItemWithError(fields, name) != NULL) + if ((PyDict_GetItemWithError(fields, name) != NULL) // noqa: borrowed-ref OK || (title && PyUnicode_Check(title) - && (PyDict_GetItemWithError(fields, title) != NULL))) { + && (PyDict_GetItemWithError(fields, title) != NULL))) { // noqa: borrowed-ref OK PyErr_Format(PyExc_ValueError, "field %R occurs more than once", name); Py_DECREF(conv); @@ -548,7 +540,7 @@ _convert_from_array_descr(PyObject *obj, int align) goto fail; } if (PyUnicode_Check(title)) { - PyObject *existing = PyDict_GetItemWithError(fields, title); + PyObject *existing = PyDict_GetItemWithError(fields, title); // noqa: borrowed-ref OK if (existing == NULL && PyErr_Occurred()) { goto fail; } @@ -613,7 +605,7 @@ _convert_from_list(PyObject *obj, int align) * Ignore any empty string at end which _internal._commastring * can produce */ - PyObject *last_item = PyList_GET_ITEM(obj, n-1); + PyObject *last_item = PyList_GET_ITEM(obj, n-1); // noqa: borrowed-ref OK if (PyUnicode_Check(last_item)) { Py_ssize_t s = PySequence_Size(last_item); if (s < 0) { @@ -643,7 +635,7 @@ _convert_from_list(PyObject *obj, int align) int totalsize = 0; for (int i = 0; i < n; i++) { PyArray_Descr *conv = _convert_from_any( - PyList_GET_ITEM(obj, i), align); + PyList_GET_ITEM(obj, i), align); // noqa: borrowed-ref OK if (conv == NULL) { goto fail; } @@ -794,7 +786,7 @@ _validate_union_object_dtype(_PyArray_LegacyDescr *new, _PyArray_LegacyDescr *co if (name == NULL) { return -1; } - tup = PyDict_GetItemWithError(conv->fields, name); + tup = PyDict_GetItemWithError(conv->fields, name); // noqa: borrowed-ref OK if (tup == NULL) { if (!PyErr_Occurred()) { /* fields was missing the name it claimed to contain */ @@ -940,7 +932,7 @@ _validate_object_field_overlap(_PyArray_LegacyDescr *dtype) if (key == NULL) { return -1; } - tup = PyDict_GetItemWithError(fields, key); + tup = PyDict_GetItemWithError(fields, key); // noqa: borrowed-ref OK if (tup == NULL) { if (!PyErr_Occurred()) { /* fields was missing the name it claimed to contain */ @@ -960,7 +952,7 @@ _validate_object_field_overlap(_PyArray_LegacyDescr *dtype) if (key == NULL) { return -1; } - tup = PyDict_GetItemWithError(fields, key); + tup = PyDict_GetItemWithError(fields, key); // noqa: borrowed-ref OK if (tup == NULL) { if (!PyErr_Occurred()) { /* fields was missing the name it claimed to contain */ @@ -1029,17 +1021,13 @@ _validate_object_field_overlap(_PyArray_LegacyDescr *dtype) static PyArray_Descr * _convert_from_field_dict(PyObject *obj, int align) { - PyObject *_numpy_internal; - PyArray_Descr *res; - - _numpy_internal = PyImport_ImportModule("numpy._core._internal"); - if (_numpy_internal == NULL) { + if (npy_cache_import_runtime( + "numpy._core._internal", "_usefields", &npy_runtime_imports._usefields) < 0) { return NULL; } - res = (PyArray_Descr *)PyObject_CallMethod(_numpy_internal, - "_usefields", "Oi", obj, align); - Py_DECREF(_numpy_internal); - return res; + + return (PyArray_Descr *)PyObject_CallFunctionObjArgs( + npy_runtime_imports._usefields, obj, align ? Py_True : Py_False, NULL); } /* @@ -1217,7 +1205,7 @@ _convert_from_dict(PyObject *obj, int align) } /* Insert into dictionary */ - if (PyDict_GetItemWithError(fields, name) != NULL) { + if (PyDict_GetItemWithError(fields, name) != NULL) { // noqa: borrowed-ref OK PyErr_SetString(PyExc_ValueError, "name already used as a name or title"); Py_DECREF(tup); @@ -1236,7 +1224,7 @@ _convert_from_dict(PyObject *obj, int align) } if (len == 3) { if (PyUnicode_Check(title)) { - if (PyDict_GetItemWithError(fields, title) != NULL) { + if (PyDict_GetItemWithError(fields, title) != NULL) { // noqa: borrowed-ref OK PyErr_SetString(PyExc_ValueError, "title already used as a name or title."); Py_DECREF(tup); @@ -1899,7 +1887,7 @@ _convert_from_str(PyObject *obj, int align) if (typeDict == NULL) { goto fail; } - PyObject *item = PyDict_GetItemWithError(typeDict, obj); + PyObject *item = PyDict_GetItemWithError(typeDict, obj); // noqa: borrowed-ref - manual fix needed if (item == NULL) { if (PyErr_Occurred()) { return NULL; @@ -2095,7 +2083,7 @@ static PyMemberDef arraydescr_members[] = { {"alignment", T_PYSSIZET, offsetof(PyArray_Descr, alignment), READONLY, NULL}, {"flags", -#if NPY_ULONGLONG == NPY_UINT64 +#if NPY_SIZEOF_LONGLONG == 8 T_ULONGLONG, offsetof(PyArray_Descr, flags), READONLY, NULL}, #else #error Assuming long long is 64bit, if not replace with getter function. @@ -2280,7 +2268,7 @@ _arraydescr_isnative(PyArray_Descr *self) PyArray_Descr *new; int offset; Py_ssize_t pos = 0; - while (PyDict_Next(PyDataType_FIELDS(self), &pos, &key, &value)) { + while (PyDict_Next(PyDataType_FIELDS(self), &pos, &key, &value)) { // noqa: borrowed-ref OK if (NPY_TITLE_KEY(key, value)) { continue; } @@ -2426,7 +2414,7 @@ arraydescr_names_set( int ret; key = PyTuple_GET_ITEM(self->names, i); /* Borrowed references to item and new_key */ - item = PyDict_GetItemWithError(self->fields, key); + item = PyDict_GetItemWithError(self->fields, key); // noqa: borrowed-ref OK if (item == NULL) { if (!PyErr_Occurred()) { /* fields was missing the name it claimed to contain */ @@ -2554,7 +2542,9 @@ arraydescr_new(PyTypeObject *subtype, return NULL; } - PyObject *odescr, *metadata=NULL; + PyObject *odescr; + PyObject *oalign = NULL; + PyObject *metadata = NULL; PyArray_Descr *conv; npy_bool align = NPY_FALSE; npy_bool copy = NPY_FALSE; @@ -2562,14 +2552,33 @@ arraydescr_new(PyTypeObject *subtype, static char *kwlist[] = {"dtype", "align", "copy", "metadata", NULL}; - if (!PyArg_ParseTupleAndKeywords(args, kwds, "O|O&O&O!:dtype", kwlist, + if (!PyArg_ParseTupleAndKeywords(args, kwds, "O|OO&O!:dtype", kwlist, &odescr, - PyArray_BoolConverter, &align, + &oalign, PyArray_BoolConverter, ©, &PyDict_Type, &metadata)) { return NULL; } + if (oalign != NULL) { + /* + * In the future, reject non Python (or NumPy) boolean, including integers to avoid any + * possibility of thinking that an integer alignment makes sense here. + */ + if (!PyBool_Check(oalign) && !PyArray_IsScalar(oalign, Bool)) { + /* Deprecated 2025-07-01: NumPy 2.4 */ + if (PyErr_WarnFormat(npy_static_pydata.VisibleDeprecationWarning, 1, + "dtype(): align should be passed as Python or NumPy boolean but got `align=%.100R`. " + "Did you mean to pass a tuple to create a subarray type? (Deprecated NumPy 2.4)", + oalign) < 0) { + return NULL; + } + } + if (!PyArray_BoolConverter(oalign, &align)) { + return NULL; + } + } + conv = _convert_from_any(odescr, align); if (conv == NULL) { return NULL; @@ -2654,8 +2663,10 @@ _get_pickleabletype_from_datetime_metadata(PyArray_Descr *dtype) if (dtype->metadata != NULL) { Py_INCREF(dtype->metadata); PyTuple_SET_ITEM(ret, 0, dtype->metadata); - } else { - PyTuple_SET_ITEM(ret, 0, PyDict_New()); + } + else { + PyTuple_SET_ITEM(ret, 0, Py_None); + Py_INCREF(Py_None); } /* Convert the datetime metadata into a tuple */ @@ -2831,7 +2842,7 @@ _descr_find_object(PyArray_Descr *self) int offset; Py_ssize_t pos = 0; - while (PyDict_Next(PyDataType_FIELDS(self), &pos, &key, &value)) { + while (PyDict_Next(PyDataType_FIELDS(self), &pos, &key, &value)) { // noqa: borrowed-ref OK if (NPY_TITLE_KEY(key, value)) { continue; } @@ -2855,13 +2866,13 @@ _descr_find_object(PyArray_Descr *self) static PyObject * arraydescr_setstate(_PyArray_LegacyDescr *self, PyObject *args) { - int elsize = -1, alignment = -1; + Py_ssize_t elsize = -1, alignment = -1; int version = 4; char endian; PyObject *endian_obj; PyObject *subarray, *fields, *names = NULL, *metadata=NULL; int incref_names = 1; - int int_dtypeflags = 0; + npy_int64 signed_dtypeflags = 0; npy_uint64 dtypeflags; if (!PyDataType_ISLEGACY(self)) { @@ -2880,24 +2891,24 @@ arraydescr_setstate(_PyArray_LegacyDescr *self, PyObject *args) } switch (PyTuple_GET_SIZE(PyTuple_GET_ITEM(args,0))) { case 9: - if (!PyArg_ParseTuple(args, "(iOOOOiiiO):__setstate__", + if (!PyArg_ParseTuple(args, "(iOOOOnnkO):__setstate__", &version, &endian_obj, &subarray, &names, &fields, &elsize, - &alignment, &int_dtypeflags, &metadata)) { + &alignment, &signed_dtypeflags, &metadata)) { PyErr_Clear(); return NULL; } break; case 8: - if (!PyArg_ParseTuple(args, "(iOOOOiii):__setstate__", + if (!PyArg_ParseTuple(args, "(iOOOOnnk):__setstate__", &version, &endian_obj, &subarray, &names, &fields, &elsize, - &alignment, &int_dtypeflags)) { + &alignment, &signed_dtypeflags)) { return NULL; } break; case 7: - if (!PyArg_ParseTuple(args, "(iOOOOii):__setstate__", + if (!PyArg_ParseTuple(args, "(iOOOOnn):__setstate__", &version, &endian_obj, &subarray, &names, &fields, &elsize, &alignment)) { @@ -2905,7 +2916,7 @@ arraydescr_setstate(_PyArray_LegacyDescr *self, PyObject *args) } break; case 6: - if (!PyArg_ParseTuple(args, "(iOOOii):__setstate__", + if (!PyArg_ParseTuple(args, "(iOOOnn):__setstate__", &version, &endian_obj, &subarray, &fields, &elsize, &alignment)) { @@ -2914,7 +2925,7 @@ arraydescr_setstate(_PyArray_LegacyDescr *self, PyObject *args) break; case 5: version = 0; - if (!PyArg_ParseTuple(args, "(OOOii):__setstate__", + if (!PyArg_ParseTuple(args, "(OOOnn):__setstate__", &endian_obj, &subarray, &fields, &elsize, &alignment)) { return NULL; @@ -2947,7 +2958,7 @@ arraydescr_setstate(_PyArray_LegacyDescr *self, PyObject *args) if (fields != Py_None) { PyObject *key, *list; key = PyLong_FromLong(-1); - list = PyDict_GetItemWithError(fields, key); + list = PyDict_GetItemWithError(fields, key); // noqa: borrowed-ref OK if (!list) { if (!PyErr_Occurred()) { /* fields was missing the name it claimed to contain */ @@ -3123,7 +3134,7 @@ arraydescr_setstate(_PyArray_LegacyDescr *self, PyObject *args) for (i = 0; i < PyTuple_GET_SIZE(names); ++i) { name = PyTuple_GET_ITEM(names, i); - field = PyDict_GetItemWithError(fields, name); + field = PyDict_GetItemWithError(fields, name); // noqa: borrowed-ref OK if (!field) { if (!PyErr_Occurred()) { /* fields was missing the name it claimed to contain */ @@ -3162,12 +3173,12 @@ arraydescr_setstate(_PyArray_LegacyDescr *self, PyObject *args) * flags as an int even though it actually was a char in the PyArray_Descr * structure */ - if (int_dtypeflags < 0 && int_dtypeflags >= -128) { + if (signed_dtypeflags < 0 && signed_dtypeflags >= -128) { /* NumPy used to use a char. So normalize if signed. */ - int_dtypeflags += 128; + signed_dtypeflags += 128; } - dtypeflags = int_dtypeflags; - if (dtypeflags != int_dtypeflags) { + dtypeflags = (npy_uint64)signed_dtypeflags; + if (dtypeflags != signed_dtypeflags) { PyErr_Format(PyExc_ValueError, "incorrect value for flags variable (overflow)"); return NULL; @@ -3180,16 +3191,8 @@ arraydescr_setstate(_PyArray_LegacyDescr *self, PyObject *args) self->flags = _descr_find_object((PyArray_Descr *)self); } - /* - * We have a borrowed reference to metadata so no need - * to alter reference count when throwing away Py_None. - */ - if (metadata == Py_None) { - metadata = NULL; - } - - if (PyDataType_ISDATETIME(self) && (metadata != NULL)) { - PyObject *old_metadata; + PyObject *old_metadata, *new_metadata; + if (PyDataType_ISDATETIME(self)) { PyArray_DatetimeMetaData temp_dt_data; if ((! PyTuple_Check(metadata)) || (PyTuple_Size(metadata) != 2)) { @@ -3206,20 +3209,26 @@ arraydescr_setstate(_PyArray_LegacyDescr *self, PyObject *args) return NULL; } - old_metadata = self->metadata; - self->metadata = PyTuple_GET_ITEM(metadata, 0); + new_metadata = PyTuple_GET_ITEM(metadata, 0); memcpy((char *) &((PyArray_DatetimeDTypeMetaData *)self->c_metadata)->meta, - (char *) &temp_dt_data, - sizeof(PyArray_DatetimeMetaData)); - Py_XINCREF(self->metadata); - Py_XDECREF(old_metadata); + (char *) &temp_dt_data, + sizeof(PyArray_DatetimeMetaData)); } else { - PyObject *old_metadata = self->metadata; - self->metadata = metadata; - Py_XINCREF(self->metadata); - Py_XDECREF(old_metadata); + new_metadata = metadata; + } + + old_metadata = self->metadata; + /* + * We have a borrowed reference to metadata so no need + * to alter reference count when throwing away Py_None. + */ + if (new_metadata == Py_None) { + new_metadata = NULL; } + self->metadata = new_metadata; + Py_XINCREF(new_metadata); + Py_XDECREF(old_metadata); Py_RETURN_NONE; } @@ -3327,7 +3336,7 @@ PyArray_DescrNewByteorder(PyArray_Descr *oself, char newendian) return NULL; } /* make new dictionary with replaced PyArray_Descr Objects */ - while (PyDict_Next(self->fields, &pos, &key, &value)) { + while (PyDict_Next(self->fields, &pos, &key, &value)) { // noqa: borrowed-ref OK if (NPY_TITLE_KEY(key, value)) { continue; } @@ -3453,7 +3462,7 @@ is_dtype_struct_simple_unaligned_layout(PyArray_Descr *dtype) if (key == NULL) { return 0; } - tup = PyDict_GetItem(fields, key); + tup = PyDict_GetItem(fields, key); // noqa: borrowed-ref OK if (tup == NULL) { return 0; } @@ -3618,7 +3627,7 @@ _check_has_fields(PyArray_Descr *self) static PyObject * _subscript_by_name(_PyArray_LegacyDescr *self, PyObject *op) { - PyObject *obj = PyDict_GetItemWithError(self->fields, op); + PyObject *obj = PyDict_GetItemWithError(self->fields, op); // noqa: borrowed-ref OK if (obj == NULL) { if (!PyErr_Occurred()) { PyErr_Format(PyExc_KeyError, @@ -3655,7 +3664,7 @@ _is_list_of_strings(PyObject *obj) } seqlen = PyList_GET_SIZE(obj); for (i = 0; i < seqlen; i++) { - PyObject *item = PyList_GET_ITEM(obj, i); + PyObject *item = PyList_GET_ITEM(obj, i); // noqa: borrowed-ref - manual fix needed if (!PyUnicode_Check(item)) { return NPY_FALSE; } @@ -3699,7 +3708,7 @@ arraydescr_field_subset_view(_PyArray_LegacyDescr *self, PyObject *ind) */ PyTuple_SET_ITEM(names, i, name); - tup = PyDict_GetItemWithError(self->fields, name); + tup = PyDict_GetItemWithError(self->fields, name); // noqa: borrowed-ref OK if (tup == NULL) { if (!PyErr_Occurred()) { PyErr_SetObject(PyExc_KeyError, name); diff --git a/numpy/_core/src/multiarray/descriptor.h b/numpy/_core/src/multiarray/descriptor.h index 820e53f0c3e8..284afabe96fc 100644 --- a/numpy/_core/src/multiarray/descriptor.h +++ b/numpy/_core/src/multiarray/descriptor.h @@ -44,9 +44,6 @@ NPY_NO_EXPORT PyObject *arraydescr_protocol_descr_get( NPY_NO_EXPORT PyObject * array_set_typeDict(PyObject *NPY_UNUSED(ignored), PyObject *args); -NPY_NO_EXPORT PyArray_Descr * -_arraydescr_try_convert_from_dtype_attr(PyObject *obj); - NPY_NO_EXPORT int is_dtype_struct_simple_unaligned_layout(PyArray_Descr *dtype); diff --git a/numpy/_core/src/multiarray/dlpack.c b/numpy/_core/src/multiarray/dlpack.c index ac37a04c30c6..29e5aecec5d5 100644 --- a/numpy/_core/src/multiarray/dlpack.c +++ b/numpy/_core/src/multiarray/dlpack.c @@ -285,10 +285,6 @@ fill_dl_tensor_information( } dl_tensor->ndim = ndim; - if (PyArray_IS_C_CONTIGUOUS(self)) { - /* No need to pass strides, so just NULL it again */ - dl_tensor->strides = NULL; - } dl_tensor->byte_offset = 0; return 0; @@ -351,9 +347,8 @@ create_dlpack_capsule( dl_tensor = &managed->dl_tensor; } - dl_tensor->shape = (int64_t *)((char *)ptr + offset); - /* Note that strides may be set to NULL later if C-contiguous */ - dl_tensor->strides = dl_tensor->shape + ndim; + dl_tensor->shape = (ndim > 0) ? (int64_t *)((char *)ptr + offset) : NULL; + dl_tensor->strides = (ndim > 0) ? dl_tensor->shape + ndim : NULL; if (fill_dl_tensor_information(dl_tensor, self, result_device) < 0) { PyMem_Free(ptr); @@ -397,7 +392,8 @@ device_converter(PyObject *obj, DLDevice *result_device) return NPY_SUCCEED; } - PyErr_SetString(PyExc_ValueError, "unsupported device requested"); + /* Must be a BufferError */ + PyErr_SetString(PyExc_BufferError, "unsupported device requested"); return NPY_FAIL; } diff --git a/numpy/_core/src/multiarray/dragon4_LICENSE.txt b/numpy/_core/src/multiarray/dragon4_LICENSE.txt new file mode 100644 index 000000000000..7bd49e7074a8 --- /dev/null +++ b/numpy/_core/src/multiarray/dragon4_LICENSE.txt @@ -0,0 +1,27 @@ +Copyright (c) 2014 Ryan Juckett + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to +deal in the Software without restriction, including without limitation the +rights to use, copy, modify, merge, publish, distribute, sublicense, and/or +sell copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +IN THE SOFTWARE. + +dragon4.c|h h contains a modified version of Ryan Juckett's Dragon4 +implementation, obtained from https://www.ryanjuckett.com, +which has been ported from C++ to C and which has +modifications specific to printing floats in numpy. + +Ryan Juckett's original code was under the Zlib license; he gave numpy +permission to include it under the MIT license instead. diff --git a/numpy/_core/src/multiarray/dtype_transfer.c b/numpy/_core/src/multiarray/dtype_transfer.c index 188a55a4b5f5..dbad10842aff 100644 --- a/numpy/_core/src/multiarray/dtype_transfer.c +++ b/numpy/_core/src/multiarray/dtype_transfer.c @@ -2319,7 +2319,7 @@ get_fields_transfer_function(int NPY_UNUSED(aligned), *out_flags = PyArrayMethod_MINIMAL_FLAGS; for (i = 0; i < field_count; ++i) { key = PyTuple_GET_ITEM(PyDataType_NAMES(dst_dtype), i); - tup = PyDict_GetItem(PyDataType_FIELDS(dst_dtype), key); + tup = PyDict_GetItem(PyDataType_FIELDS(dst_dtype), key); // noqa: borrowed-ref OK if (!PyArg_ParseTuple(tup, "Oi|O", &dst_fld_dtype, &dst_offset, &title)) { PyMem_Free(data); @@ -2383,7 +2383,7 @@ get_fields_transfer_function(int NPY_UNUSED(aligned), NPY_traverse_info_init(&data->decref_src); key = PyTuple_GET_ITEM(PyDataType_NAMES(src_dtype), 0); - tup = PyDict_GetItem(PyDataType_FIELDS(src_dtype), key); + tup = PyDict_GetItem(PyDataType_FIELDS(src_dtype), key); // noqa: borrowed-ref OK if (!PyArg_ParseTuple(tup, "Oi|O", &src_fld_dtype, &src_offset, &title)) { PyMem_Free(data); @@ -2435,14 +2435,14 @@ get_fields_transfer_function(int NPY_UNUSED(aligned), /* set up the transfer function for each field */ for (i = 0; i < field_count; ++i) { key = PyTuple_GET_ITEM(PyDataType_NAMES(dst_dtype), i); - tup = PyDict_GetItem(PyDataType_FIELDS(dst_dtype), key); + tup = PyDict_GetItem(PyDataType_FIELDS(dst_dtype), key); // noqa: borrowed-ref OK if (!PyArg_ParseTuple(tup, "Oi|O", &dst_fld_dtype, &dst_offset, &title)) { NPY_AUXDATA_FREE((NpyAuxData *)data); return NPY_FAIL; } key = PyTuple_GET_ITEM(PyDataType_NAMES(src_dtype), i); - tup = PyDict_GetItem(PyDataType_FIELDS(src_dtype), key); + tup = PyDict_GetItem(PyDataType_FIELDS(src_dtype), key); // noqa: borrowed-ref OK if (!PyArg_ParseTuple(tup, "Oi|O", &src_fld_dtype, &src_offset, &title)) { NPY_AUXDATA_FREE((NpyAuxData *)data); @@ -2910,8 +2910,6 @@ _clear_cast_info_after_get_loop_failure(NPY_cast_info *cast_info) * TODO: Expand the view functionality for general offsets, not just 0: * Partial casts could be skipped also for `view_offset != 0`. * - * The `out_needs_api` flag must be initialized. - * * NOTE: In theory casting errors here could be slightly misleading in case * of a multi-step casting scenario. It should be possible to improve * this in the future. @@ -3428,11 +3426,13 @@ PyArray_CastRawArrays(npy_intp count, /* Cast */ char *args[2] = {src, dst}; npy_intp strides[2] = {src_stride, dst_stride}; - cast_info.func(&cast_info.context, args, &count, strides, cast_info.auxdata); + int result = cast_info.func(&cast_info.context, args, &count, strides, cast_info.auxdata); /* Cleanup */ NPY_cast_info_xfree(&cast_info); - + if (result < 0) { + return NPY_FAIL; + } if (flags & NPY_METH_REQUIRES_PYAPI && PyErr_Occurred()) { return NPY_FAIL; } diff --git a/numpy/_core/src/multiarray/dtype_transfer.h b/numpy/_core/src/multiarray/dtype_transfer.h index 04df5cb64c22..a354820e5d45 100644 --- a/numpy/_core/src/multiarray/dtype_transfer.h +++ b/numpy/_core/src/multiarray/dtype_transfer.h @@ -25,6 +25,15 @@ typedef struct { } NPY_cast_info; +static inline void +NPY_context_init(PyArrayMethod_Context *context, PyArray_Descr *descr[2]) +{ + context->descriptors = descr; + context->caller = NULL; + context->_reserved = NULL; + context->flags = 0; +} + /* * Create a new cast-info struct with cast_info->context.descriptors linked. * Compilers should inline this to ensure the whole struct is not actually @@ -40,13 +49,9 @@ NPY_cast_info_init(NPY_cast_info *cast_info) * a scratch space to `NPY_cast_info` and link to that instead. */ cast_info->auxdata = NULL; - cast_info->context.descriptors = cast_info->descriptors; - - // TODO: Delete this again probably maybe create a new minimal init macro - cast_info->context.caller = NULL; + NPY_context_init(&(cast_info->context), cast_info->descriptors); } - /* * Free's all references and data held inside the struct (not the struct). * First checks whether `cast_info.func == NULL`, and assume it is @@ -100,6 +105,7 @@ NPY_cast_info_copy(NPY_cast_info *cast_info, NPY_cast_info *original) Py_XINCREF(cast_info->descriptors[1]); cast_info->context.caller = original->context.caller; Py_XINCREF(cast_info->context.caller); + cast_info->context.flags = original->context.flags; cast_info->context.method = original->context.method; Py_XINCREF(cast_info->context.method); if (original->auxdata == NULL) { diff --git a/numpy/_core/src/multiarray/dtype_traversal.c b/numpy/_core/src/multiarray/dtype_traversal.c index 91b1889b7d1f..e86aab7411d4 100644 --- a/numpy/_core/src/multiarray/dtype_traversal.c +++ b/numpy/_core/src/multiarray/dtype_traversal.c @@ -346,7 +346,7 @@ get_fields_traverse_function( int offset; key = PyTuple_GET_ITEM(names, i); - tup = PyDict_GetItem(dtype->fields, key); + tup = PyDict_GetItem(dtype->fields, key); // noqa: borrowed-ref OK if (!PyArg_ParseTuple(tup, "Oi|O", &fld_dtype, &offset, &title)) { NPY_AUXDATA_FREE((NpyAuxData *)data); return -1; diff --git a/numpy/_core/src/multiarray/dtypemeta.c b/numpy/_core/src/multiarray/dtypemeta.c index 0b1b0fb39192..bada1addd9cc 100644 --- a/numpy/_core/src/multiarray/dtypemeta.c +++ b/numpy/_core/src/multiarray/dtypemeta.c @@ -96,7 +96,7 @@ use_new_as_default(PyArray_DTypeMeta *self) return NULL; } /* - * Lets not trust that the DType is implemented correctly + * Let's not trust that the DType is implemented correctly * TODO: Should probably do an exact type-check (at least unless this is * an abstract DType). */ @@ -118,6 +118,31 @@ use_new_as_default(PyArray_DTypeMeta *self) } +/* + * By default fill in zero, one, and negative one via the Python casts, + * users should override this, but this allows us to use it for legacy user dtypes. + */ +static int +default_get_constant(PyArray_Descr *descr, int constant_id, void *data) +{ + return 0; +} + + +static int +legacy_fallback_setitem(PyArray_Descr *descr, PyObject *value, char *data) +{ + PyArrayObject_fields arr_fields = { + .flags = NPY_ARRAY_WRITEABLE, /* assume array is not behaved. */ + .descr = descr, + }; + Py_SET_TYPE(&arr_fields, &PyArray_Type); + Py_SET_REFCNT(&arr_fields, 1); + + return PyDataType_GetArrFuncs(descr)->setitem(value, data, &arr_fields); +} + + static int legacy_setitem_using_DType(PyObject *obj, void *data, void *arr) { @@ -127,9 +152,7 @@ legacy_setitem_using_DType(PyObject *obj, void *data, void *arr) "supported for basic NumPy DTypes."); return -1; } - PyArrayDTypeMeta_SetItem *setitem; - setitem = NPY_DT_SLOTS(NPY_DTYPE(PyArray_DESCR(arr)))->setitem; - return setitem(PyArray_DESCR(arr), obj, data); + return NPY_DT_CALL_setitem(PyArray_DESCR(arr), obj, data); } @@ -195,6 +218,7 @@ dtypemeta_initialize_struct_from_spec( NPY_DT_SLOTS(DType)->get_clear_loop = NULL; NPY_DT_SLOTS(DType)->get_fill_zero_loop = NULL; NPY_DT_SLOTS(DType)->finalize_descr = NULL; + NPY_DT_SLOTS(DType)->get_constant = default_get_constant; NPY_DT_SLOTS(DType)->f = default_funcs; PyType_Slot *spec_slot = spec->slots; @@ -693,7 +717,7 @@ void_ensure_canonical(_PyArray_LegacyDescr *self) int maxalign = 1; for (Py_ssize_t i = 0; i < field_num; i++) { PyObject *name = PyTuple_GET_ITEM(self->names, i); - PyObject *tuple = PyDict_GetItem(self->fields, name); + PyObject *tuple = PyDict_GetItem(self->fields, name); // noqa: borrowed-ref OK PyObject *new_tuple = PyTuple_New(PyTuple_GET_SIZE(tuple)); PyArray_Descr *field_descr = NPY_DT_CALL_ensure_canonical( (PyArray_Descr *)PyTuple_GET_ITEM(tuple, 0)); @@ -1068,9 +1092,9 @@ object_common_dtype( * Some may have more aliases, as `intp` is not its own thing, * as of writing this, these are not added here. * - * @returns 0 on success, -1 on failure. + * @returns A borrowed references to the new DType or NULL. */ -NPY_NO_EXPORT int +NPY_NO_EXPORT PyArray_DTypeMeta * dtypemeta_wrap_legacy_descriptor( _PyArray_LegacyDescr *descr, PyArray_ArrFuncs *arr_funcs, PyTypeObject *dtype_super_class, const char *name, const char *alias) @@ -1097,19 +1121,20 @@ dtypemeta_wrap_legacy_descriptor( "that of an existing dtype (with the assumption it is just " "copied over and can be replaced).", descr->typeobj, Py_TYPE(descr)); - return -1; + return NULL; } NPY_DType_Slots *dt_slots = PyMem_Malloc(sizeof(NPY_DType_Slots)); if (dt_slots == NULL) { - return -1; + return NULL; } memset(dt_slots, '\0', sizeof(NPY_DType_Slots)); + dt_slots->get_constant = default_get_constant; PyArray_DTypeMeta *dtype_class = PyMem_Malloc(sizeof(PyArray_DTypeMeta)); if (dtype_class == NULL) { PyMem_Free(dt_slots); - return -1; + return NULL; } /* @@ -1129,12 +1154,7 @@ dtypemeta_wrap_legacy_descriptor( .tp_flags = Py_TPFLAGS_DEFAULT, .tp_base = NULL, /* set below */ .tp_new = (newfunc)legacy_dtype_default_new, - .tp_doc = ( - "DType class corresponding to the scalar type and dtype of " - "the same name.\n\n" - "Please see `numpy.dtype` for the typical way to create\n" - "dtype instances and :ref:`arrays.dtypes` for additional\n" - "information."), + .tp_doc = NULL, /* set in python */ },}, .flags = NPY_DT_LEGACY, /* Further fields are not common between DTypes */ @@ -1148,12 +1168,12 @@ dtypemeta_wrap_legacy_descriptor( /* Let python finish the initialization */ if (PyType_Ready((PyTypeObject *)dtype_class) < 0) { Py_DECREF(dtype_class); - return -1; + return NULL; } dt_slots->castingimpls = PyDict_New(); if (dt_slots->castingimpls == NULL) { Py_DECREF(dtype_class); - return -1; + return NULL; } /* @@ -1169,13 +1189,20 @@ dtypemeta_wrap_legacy_descriptor( /* Set default functions (correct for most dtypes, override below) */ dt_slots->default_descr = nonparametric_default_descr; dt_slots->discover_descr_from_pyobject = ( - nonparametric_discover_descr_from_pyobject); + nonparametric_discover_descr_from_pyobject); dt_slots->is_known_scalar_type = python_builtins_are_known_scalar_types; dt_slots->common_dtype = default_builtin_common_dtype; dt_slots->common_instance = NULL; dt_slots->ensure_canonical = ensure_native_byteorder; dt_slots->get_fill_zero_loop = NULL; dt_slots->finalize_descr = NULL; + // May be overwritten, but if not provide fallback via array struct hack. + // `getitem` is a trickier because of structured dtypes returning views. + if (dt_slots->f.setitem == NULL) { + dt_slots->f.setitem = legacy_setitem_using_DType; + } + dt_slots->setitem = legacy_fallback_setitem; + dt_slots->getitem = NULL; if (PyTypeNum_ISSIGNED(dtype_class->type_num)) { /* Convert our scalars (raise on too large unsigned and NaN, etc.) */ @@ -1233,7 +1260,7 @@ dtypemeta_wrap_legacy_descriptor( if (_PyArray_MapPyTypeToDType(dtype_class, descr->typeobj, PyTypeNum_ISUSERDEF(dtype_class->type_num)) < 0) { Py_DECREF(dtype_class); - return -1; + return NULL; } /* Finally, replace the current class of the descr */ @@ -1243,23 +1270,23 @@ dtypemeta_wrap_legacy_descriptor( if (!PyTypeNum_ISUSERDEF(descr->type_num)) { if (npy_cache_import_runtime("numpy.dtypes", "_add_dtype_helper", &npy_runtime_imports._add_dtype_helper) == -1) { - return -1; + return NULL; } if (PyObject_CallFunction( npy_runtime_imports._add_dtype_helper, "Os", (PyObject *)dtype_class, alias) == NULL) { - return -1; + return NULL; } } else { // ensure the within dtype cast is populated for legacy user dtypes if (PyArray_GetCastingImpl(dtype_class, dtype_class) == NULL) { - return -1; + return NULL; } } - return 0; + return dtype_class; } @@ -1398,7 +1425,7 @@ PyArray_DTypeMeta *_Void_dtype = NULL; /*NUMPY_API - * Fetch the ArrFuncs struct which new lives on the DType and not the + * Fetch the ArrFuncs struct which now lives on the DType and not the * descriptor. Use of this struct should be avoided but remains necessary * for certain functionality. * diff --git a/numpy/_core/src/multiarray/dtypemeta.h b/numpy/_core/src/multiarray/dtypemeta.h index a8b78e3f7518..bf0acb48b899 100644 --- a/numpy/_core/src/multiarray/dtypemeta.h +++ b/numpy/_core/src/multiarray/dtypemeta.h @@ -67,6 +67,11 @@ typedef struct { * parameters, if any, as the operand dtype. */ PyArrayDTypeMeta_FinalizeDescriptor *finalize_descr; + /* + * Function to fetch constants. Always defined, but may return "undefined" + * for all values. + */ + PyArrayDTypeMeta_GetConstant *get_constant; /* * The casting implementation (ArrayMethod) to convert between two * instances of this DType, stored explicitly for fast access: @@ -85,11 +90,17 @@ typedef struct { * dtype instance for backward compatibility. (Keep this at end) */ PyArray_ArrFuncs f; + + /* + * Hidden slots for the sort and argsort arraymethods. + */ + PyArrayMethodObject *sort_meth; + PyArrayMethodObject *argsort_meth; } NPY_DType_Slots; // This must be updated if new slots before within_dtype_castingimpl // are added -#define NPY_NUM_DTYPE_SLOTS 11 +#define NPY_NUM_DTYPE_SLOTS 12 #define NPY_NUM_DTYPE_PYARRAY_ARRFUNCS_SLOTS 22 #define NPY_DT_MAX_ARRFUNCS_SLOT \ NPY_NUM_DTYPE_PYARRAY_ARRFUNCS_SLOTS + _NPY_DT_ARRFUNCS_OFFSET @@ -124,6 +135,8 @@ typedef struct { NPY_DT_SLOTS(NPY_DTYPE(descr))->getitem(descr, data_ptr) #define NPY_DT_CALL_setitem(descr, value, data_ptr) \ NPY_DT_SLOTS(NPY_DTYPE(descr))->setitem(descr, value, data_ptr) +#define NPY_DT_CALL_get_constant(descr, constant_id, data_ptr) \ + NPY_DT_SLOTS(NPY_DTYPE(descr))->get_constant(descr, constant_id, data_ptr) /* @@ -153,7 +166,7 @@ NPY_NO_EXPORT int python_builtins_are_known_scalar_types( PyArray_DTypeMeta *cls, PyTypeObject *pytype); -NPY_NO_EXPORT int +NPY_NO_EXPORT PyArray_DTypeMeta * dtypemeta_wrap_legacy_descriptor( _PyArray_LegacyDescr *descr, PyArray_ArrFuncs *arr_funcs, PyTypeObject *dtype_super_class, const char *name, const char *alias); @@ -281,8 +294,7 @@ PyArray_GETITEM(const PyArrayObject *arr, const char *itemptr) static inline int PyArray_SETITEM(PyArrayObject *arr, char *itemptr, PyObject *v) { - return PyDataType_GetArrFuncs(((PyArrayObject_fields *)arr)->descr)->setitem( - v, itemptr, arr); + return NPY_DT_CALL_setitem(PyArray_DESCR(arr), v, itemptr); } // Like PyArray_DESCR_REPLACE, but calls ensure_canonical instead of DescrNew diff --git a/numpy/_core/src/multiarray/einsum.c.src b/numpy/_core/src/multiarray/einsum.cpp similarity index 99% rename from numpy/_core/src/multiarray/einsum.c.src rename to numpy/_core/src/multiarray/einsum.cpp index 3733c436cb1b..f12cec7824d7 100644 --- a/numpy/_core/src/multiarray/einsum.c.src +++ b/numpy/_core/src/multiarray/einsum.cpp @@ -20,14 +20,14 @@ #include //PyArray_AssignRawScalar #include - +extern "C" { #include "convert.h" #include "common.h" #include "ctors.h" #include "einsum_sumprod.h" #include "einsum_debug.h" - +} /* * Parses the subscripts for one operand into an output of 'ndim' @@ -40,7 +40,6 @@ * - subscripts="abbcbc", ndim=6 -> op_labels=[97, 98, -1, 99, -3, -2] * - subscripts="ab...bc", ndim=6 -> op_labels=[97, 98, 0, 0, -3, 99] */ - static int parse_operand_subscripts(char *subscripts, int length, int ndim, int iop, char *op_labels, @@ -131,13 +130,13 @@ parse_operand_subscripts(char *subscripts, int length, /* If it is a proper label, find any duplicates of it. */ if (label > 0) { /* Search for the next matching label. */ - char *next = memchr(op_labels + idim + 1, label, ndim - idim - 1); + char *next = (char*)memchr(op_labels + idim + 1, label, ndim - idim - 1); while (next != NULL) { /* The offset from next to op_labels[idim] (negative). */ *next = (char)((op_labels + idim) - next); /* Search for the next matching label. */ - next = memchr(next + 1, label, op_labels + ndim - 1 - next); + next = (char*)memchr(next + 1, label, op_labels + ndim - 1 - next); } } } @@ -322,7 +321,7 @@ get_single_op_view(PyArrayObject *op, char *labels, Py_TYPE(op), PyArray_DESCR(op), ndim_output, new_dims, new_strides, PyArray_DATA(op), PyArray_ISWRITEABLE(op) ? NPY_ARRAY_WRITEABLE : 0, - (PyObject *)op, (PyObject *)op, 0); + (PyObject *)op, (PyObject *)op, (_NPY_CREATION_FLAGS)0); if (*ret == NULL) { return -1; @@ -472,7 +471,7 @@ prepare_op_axes(int ndim, int iop, char *labels, int *axes, } /* It's a labeled dimension, find the matching one */ else { - char *match = memchr(labels, label, ndim); + char *match = (char*)memchr(labels, label, ndim); /* If the op doesn't have the label, broadcast it */ if (match == NULL) { axes[i] = -1; @@ -1112,6 +1111,7 @@ PyArray_EinsteinSum(char *subscripts, npy_intp nop, * the strides that are fixed for the whole loop. */ stride = NpyIter_GetInnerStrideArray(iter); + sop = get_sum_of_products_function(nop, NpyIter_GetDescrArray(iter)[0]->type_num, NpyIter_GetDescrArray(iter)[0]->elsize, diff --git a/numpy/_core/src/multiarray/flagsobject.c b/numpy/_core/src/multiarray/flagsobject.c index 8257727030c0..2570d3ec5d16 100644 --- a/numpy/_core/src/multiarray/flagsobject.c +++ b/numpy/_core/src/multiarray/flagsobject.c @@ -185,7 +185,7 @@ static char *msg = "future versions will not create a writeable " PyArrayFlagsObject *self, void *NPY_UNUSED(ignored)) \ { \ if (self->flags & NPY_ARRAY_WARN_ON_WRITE) { \ - if (PyErr_Warn(PyExc_FutureWarning, msg) < 0) {\ + if (PyErr_WarnEx(PyExc_FutureWarning, msg, 1) < 0) {\ return NULL; \ } \ }\ diff --git a/numpy/_core/src/multiarray/fnv.c b/numpy/_core/src/multiarray/fnv.c new file mode 100644 index 000000000000..2b7848519e61 --- /dev/null +++ b/numpy/_core/src/multiarray/fnv.c @@ -0,0 +1,85 @@ +/* + FNV-1a hash algorithm implementation + Based on the implementation from: + https://github.com/lcn2/fnv +*/ + +#define NPY_NO_DEPRECATED_API NPY_API_VERSION +#define _MULTIARRAYMODULE + +#include +#include "numpy/npy_common.h" +#include "fnv.h" + + +#define FNV1A_32_INIT ((npy_uint32)0x811c9dc5) +#define FNV1A_64_INIT ((npy_uint64)0xcbf29ce484222325ULL) + +/* + Compute a 32-bit FNV-1a hash of buffer + original implementation from: + https://github.com/lcn2/fnv/blob/b7fcbee95538ee6a15744e756e7e7f1c02862cb0/hash_32a.c +*/ +npy_uint32 +npy_fnv1a_32(const void *buf, size_t len, npy_uint32 hval) +{ + const unsigned char *bp = (const unsigned char *)buf; /* start of buffer */ + const unsigned char *be = bp + len; /* beyond end of buffer */ + + /* + FNV-1a hash each octet in the buffer + */ + while (bp < be) { + + /* xor the bottom with the current octet */ + hval ^= (npy_uint32)*bp++; + + /* multiply by the 32 bit FNV magic prime */ + /* hval *= 0x01000193; */ + hval += (hval<<1) + (hval<<4) + (hval<<7) + (hval<<8) + (hval<<24); + } + + return hval; +} + +/* + Compute a 64-bit FNV-1a hash of the given data + original implementation from: + https://github.com/lcn2/fnv/blob/b7fcbee95538ee6a15744e756e7e7f1c02862cb0/hash_64a.c +*/ +npy_uint64 +npy_fnv1a_64(const void *buf, size_t len, npy_uint64 hval) +{ + const unsigned char *bp = (const unsigned char *)buf; /* start of buffer */ + const unsigned char *be = bp + len; /* beyond end of buffer */ + + /* + FNV-1a hash each octet in the buffer + */ + while (bp < be) { + + /* xor the bottom with the current octet */ + hval ^= (npy_uint64)*bp++; + + /* multiply by the 64 bit FNV magic prime */ + /* hval *= 0x100000001b3ULL; */ + hval += (hval << 1) + (hval << 4) + (hval << 5) + + (hval << 7) + (hval << 8) + (hval << 40); + } + + return hval; +} + +/* + * Compute a size_t FNV-1a hash of the given data + * This will use 32-bit or 64-bit hash depending on the size of size_t + */ +size_t +npy_fnv1a(const void *buf, size_t len) +{ +#if NPY_SIZEOF_SIZE_T == 8 + return (size_t)npy_fnv1a_64(buf, len, FNV1A_64_INIT); +#else /* NPY_SIZEOF_SIZE_T == 4 */ + return (size_t)npy_fnv1a_32(buf, len, FNV1A_32_INIT); +#endif +} diff --git a/numpy/_core/src/multiarray/fnv.h b/numpy/_core/src/multiarray/fnv.h new file mode 100644 index 000000000000..c76f54a645b9 --- /dev/null +++ b/numpy/_core/src/multiarray/fnv.h @@ -0,0 +1,26 @@ +/* + FNV-1a hash algorithm implementation + Based on the implementation from: + https://github.com/lcn2/fnv +*/ + +#ifndef NUMPY_CORE_INCLUDE_NUMPY_MULTIARRAY_FNV_H_ +#define NUMPY_CORE_INCLUDE_NUMPY_MULTIARRAY_FNV_H_ + + +/* + Compute a size_t FNV-1a hash of the given data + This will use 32-bit or 64-bit hash depending on the size of size_t + + Parameters: + ----------- + buf - pointer to the data to be hashed + len - length of the data in bytes + + Returns: + ----------- + size_t hash value +*/ +size_t npy_fnv1a(const void *buf, size_t len); + +#endif // NUMPY_CORE_INCLUDE_NUMPY_MULTIARRAY_FNV_H_ diff --git a/numpy/_core/src/multiarray/getset.c b/numpy/_core/src/multiarray/getset.c index 8482b6006e3e..1aff38476d50 100644 --- a/numpy/_core/src/multiarray/getset.c +++ b/numpy/_core/src/multiarray/getset.c @@ -85,7 +85,7 @@ array_shape_set(PyArrayObject *self, PyObject *val, void* NPY_UNUSED(ignored)) /* Free old dimensions and strides */ npy_free_cache_dim_array(self); ((PyArrayObject_fields *)self)->nd = nd; - ((PyArrayObject_fields *)self)->dimensions = _dimensions; + ((PyArrayObject_fields *)self)->dimensions = _dimensions; ((PyArrayObject_fields *)self)->strides = _dimensions + nd; if (nd) { @@ -95,7 +95,7 @@ array_shape_set(PyArrayObject *self, PyObject *val, void* NPY_UNUSED(ignored)) } else { /* Free old dimensions and strides */ - npy_free_cache_dim_array(self); + npy_free_cache_dim_array(self); ((PyArrayObject_fields *)self)->nd = 0; ((PyArrayObject_fields *)self)->dimensions = NULL; ((PyArrayObject_fields *)self)->strides = NULL; @@ -116,6 +116,19 @@ array_strides_get(PyArrayObject *self, void *NPY_UNUSED(ignored)) static int array_strides_set(PyArrayObject *self, PyObject *obj, void *NPY_UNUSED(ignored)) { + if (obj == NULL) { + PyErr_SetString(PyExc_AttributeError, + "Cannot delete array strides"); + return -1; + } + + /* Deprecated NumPy 2.4, 2025-05-11 */ + if (DEPRECATE("Setting the strides on a NumPy array has been deprecated in NumPy 2.4.\n" + "As an alternative, you can create a new view using np.lib.stride_tricks.as_strided." + ) < 0 ) { + return -1; + } + PyArray_Dims newstrides = {NULL, -1}; PyArrayObject *new; npy_intp numbytes = 0; @@ -124,11 +137,6 @@ array_strides_set(PyArrayObject *self, PyObject *obj, void *NPY_UNUSED(ignored)) npy_intp upper_offset = 0; Py_buffer view; - if (obj == NULL) { - PyErr_SetString(PyExc_AttributeError, - "Cannot delete array strides"); - return -1; - } if (!PyArray_OptionalIntpConverter(obj, &newstrides) || newstrides.len == -1) { PyErr_SetString(PyExc_TypeError, "invalid strides"); @@ -492,15 +500,23 @@ array_descr_set(PyArrayObject *self, PyObject *arg, void *NPY_UNUSED(ignored)) if (temp == NULL) { return -1; } + /* create new dimensions cache and fill it */ + npy_intp new_nd = PyArray_NDIM(temp); + npy_intp *new_dims = npy_alloc_cache_dim(2 * new_nd); + if (new_dims == NULL) { + Py_DECREF(temp); + PyErr_NoMemory(); + return -1; + } + memcpy(new_dims, PyArray_DIMS(temp), new_nd * sizeof(npy_intp)); + memcpy(new_dims + new_nd, PyArray_STRIDES(temp), new_nd * sizeof(npy_intp)); + /* Update self with new cache */ npy_free_cache_dim_array(self); - ((PyArrayObject_fields *)self)->dimensions = PyArray_DIMS(temp); - ((PyArrayObject_fields *)self)->nd = PyArray_NDIM(temp); - ((PyArrayObject_fields *)self)->strides = PyArray_STRIDES(temp); + ((PyArrayObject_fields *)self)->nd = new_nd; + ((PyArrayObject_fields *)self)->dimensions = new_dims; + ((PyArrayObject_fields *)self)->strides = new_dims + new_nd; newtype = PyArray_DESCR(temp); - Py_INCREF(PyArray_DESCR(temp)); - /* Fool deallocator not to delete these*/ - ((PyArrayObject_fields *)temp)->nd = 0; - ((PyArrayObject_fields *)temp)->dimensions = NULL; + Py_INCREF(newtype); Py_DECREF(temp); } @@ -857,34 +873,6 @@ array_matrix_transpose_get(PyArrayObject *self, void *NPY_UNUSED(ignored)) return PyArray_MatrixTranspose(self); } -static PyObject * -array_ptp(PyArrayObject *self, void *NPY_UNUSED(ignored)) -{ - PyErr_SetString(PyExc_AttributeError, - "`ptp` was removed from the ndarray class in NumPy 2.0. " - "Use np.ptp(arr, ...) instead."); - return NULL; -} - -static PyObject * -array_newbyteorder(PyArrayObject *self, PyObject *args) -{ - PyErr_SetString(PyExc_AttributeError, - "`newbyteorder` was removed from the ndarray class " - "in NumPy 2.0. " - "Use `arr.view(arr.dtype.newbyteorder(order))` instead."); - return NULL; -} - -static PyObject * -array_itemset(PyArrayObject *self, PyObject *args) -{ - PyErr_SetString(PyExc_AttributeError, - "`itemset` was removed from the ndarray class in " - "NumPy 2.0. Use `arr[index] = value` instead."); - return NULL; -} - NPY_NO_EXPORT PyGetSetDef array_getsetlist[] = { {"ndim", (getter)array_ndim_get, @@ -950,18 +938,6 @@ NPY_NO_EXPORT PyGetSetDef array_getsetlist[] = { (getter)array_matrix_transpose_get, NULL, NULL, NULL}, - {"ptp", - (getter)array_ptp, - NULL, - NULL, NULL}, - {"newbyteorder", - (getter)array_newbyteorder, - NULL, - NULL, NULL}, - {"itemset", - (getter)array_itemset, - NULL, - NULL, NULL}, {"device", (getter)array_device, NULL, diff --git a/numpy/_core/src/multiarray/hashdescr.c b/numpy/_core/src/multiarray/hashdescr.c index f570caf1588f..853e247e0b74 100644 --- a/numpy/_core/src/multiarray/hashdescr.c +++ b/numpy/_core/src/multiarray/hashdescr.c @@ -6,6 +6,7 @@ #include +#include "npy_atomic.h" #include "npy_config.h" @@ -78,7 +79,7 @@ static int _array_descr_builtin(PyArray_Descr* descr, PyObject *l) * For builtin type, hash relies on : kind + byteorder + flags + * type_num + elsize + alignment */ - t = Py_BuildValue("(cccii)", descr->kind, nbyteorder, + t = Py_BuildValue("(ccKnn)", descr->kind, nbyteorder, descr->flags, descr->elsize, descr->alignment); for(i = 0; i < PyTuple_Size(t); ++i) { @@ -127,7 +128,7 @@ static int _array_descr_walk_fields(PyObject *names, PyObject* fields, PyObject* * For each field, add the key + descr + offset to l */ key = PyTuple_GET_ITEM(names, pos); - value = PyDict_GetItem(fields, key); + value = PyDict_GetItem(fields, key); // noqa: borrowed-ref OK /* XXX: are those checks necessary ? */ if (value == NULL) { PyErr_SetString(PyExc_SystemError, @@ -256,12 +257,13 @@ static int _array_descr_walk(PyArray_Descr* descr, PyObject *l) } /* - * Return 0 if successful + * Return hash on success, -1 on failure */ -static int _PyArray_DescrHashImp(PyArray_Descr *descr, npy_hash_t *hash) +static npy_hash_t _PyArray_DescrHashImp(PyArray_Descr *descr) { PyObject *l, *tl; int st; + npy_hash_t hash; l = PyList_New(0); if (l == NULL) { @@ -283,25 +285,16 @@ static int _PyArray_DescrHashImp(PyArray_Descr *descr, npy_hash_t *hash) if (tl == NULL) return -1; - *hash = PyObject_Hash(tl); + hash = PyObject_Hash(tl); Py_DECREF(tl); - if (*hash == -1) { - /* XXX: does PyObject_Hash set an exception on failure ? */ -#if 0 - PyErr_SetString(PyExc_SystemError, - "(Hash) Error while hashing final tuple"); -#endif - return -1; - } - - return 0; + return hash; } NPY_NO_EXPORT npy_hash_t PyArray_DescrHash(PyObject* odescr) { PyArray_Descr *descr; - int st; + npy_hash_t hash; if (!PyArray_DescrCheck(odescr)) { PyErr_SetString(PyExc_ValueError, @@ -310,12 +303,15 @@ PyArray_DescrHash(PyObject* odescr) } descr = (PyArray_Descr*)odescr; - if (descr->hash == -1) { - st = _PyArray_DescrHashImp(descr, &descr->hash); - if (st) { + hash = npy_atomic_load_hash_t(&descr->hash); + + if (hash == -1) { + hash = _PyArray_DescrHashImp(descr); + if (hash == -1) { return -1; } + npy_atomic_store_hash_t(&descr->hash, hash); } - return descr->hash; + return hash; } diff --git a/numpy/_core/src/multiarray/item_selection.c b/numpy/_core/src/multiarray/item_selection.c index d2db10633810..ff100c3d9d5d 100644 --- a/numpy/_core/src/multiarray/item_selection.c +++ b/numpy/_core/src/multiarray/item_selection.c @@ -4,6 +4,7 @@ #define PY_SSIZE_T_CLEAN #include #include +#include #include "numpy/arrayobject.h" #include "numpy/arrayscalars.h" @@ -24,6 +25,7 @@ #include "lowlevel_strided_loops.h" #include "array_assign.h" #include "refcount.h" +#include "methods.h" #include "npy_sort.h" #include "npy_partition.h" @@ -624,25 +626,26 @@ npy_fastputmask( npy_intp ni, npy_intp nv, npy_intp chunk) { if (chunk == 1) { - return npy_fastputmask_impl(dest, src, mask_data, ni, nv, chunk); + npy_fastputmask_impl(dest, src, mask_data, ni, nv, chunk); } - if (chunk == 2) { - return npy_fastputmask_impl(dest, src, mask_data, ni, nv, chunk); + else if (chunk == 2) { + npy_fastputmask_impl(dest, src, mask_data, ni, nv, chunk); } - if (chunk == 4) { - return npy_fastputmask_impl(dest, src, mask_data, ni, nv, chunk); + else if (chunk == 4) { + npy_fastputmask_impl(dest, src, mask_data, ni, nv, chunk); } - if (chunk == 8) { - return npy_fastputmask_impl(dest, src, mask_data, ni, nv, chunk); + else if (chunk == 8) { + npy_fastputmask_impl(dest, src, mask_data, ni, nv, chunk); } - if (chunk == 16) { - return npy_fastputmask_impl(dest, src, mask_data, ni, nv, chunk); + else if (chunk == 16) { + npy_fastputmask_impl(dest, src, mask_data, ni, nv, chunk); } - if (chunk == 32) { - return npy_fastputmask_impl(dest, src, mask_data, ni, nv, chunk); + else if (chunk == 32) { + npy_fastputmask_impl(dest, src, mask_data, ni, nv, chunk); + } + else { + npy_fastputmask_impl(dest, src, mask_data, ni, nv, chunk); } - - return npy_fastputmask_impl(dest, src, mask_data, ni, nv, chunk); } @@ -1192,6 +1195,8 @@ PyArray_Choose(PyArrayObject *ip, PyObject *op, PyArrayObject *out, */ static int _new_sortlike(PyArrayObject *op, int axis, PyArray_SortFunc *sort, + PyArrayMethod_StridedLoop *strided_loop, PyArrayMethod_Context *context, + NpyAuxData *auxdata, NPY_ARRAYMETHOD_FLAGS *method_flags, PyArray_PartitionFunc *part, npy_intp const *kth, npy_intp nkth) { npy_intp N = PyArray_DIM(op, axis); @@ -1199,8 +1204,8 @@ _new_sortlike(PyArrayObject *op, int axis, PyArray_SortFunc *sort, npy_intp astride = PyArray_STRIDE(op, axis); int swap = PyArray_ISBYTESWAPPED(op); int is_aligned = IsAligned(op); - int needcopy = !is_aligned || swap || astride != elsize; - int needs_api = PyDataType_FLAGCHK(PyArray_DESCR(op), NPY_NEEDS_PYAPI); + int needcopy = 0; + int needs_api; char *buffer = NULL; @@ -1222,6 +1227,13 @@ _new_sortlike(PyArrayObject *op, int axis, PyArray_SortFunc *sort, return 0; } + if (strided_loop != NULL) { + needs_api = *method_flags & NPY_METH_REQUIRES_PYAPI; + } + else { + needs_api = PyDataType_FLAGCHK(PyArray_DESCR(op), NPY_NEEDS_PYAPI); + } + PyObject *mem_handler = PyDataMem_GetHandler(); if (mem_handler == NULL) { return -1; @@ -1233,6 +1245,26 @@ _new_sortlike(PyArrayObject *op, int axis, PyArray_SortFunc *sort, } size = it->size; + if (strided_loop != NULL) { + // Descriptors have already been resolved + odescr = context->descriptors[0]; + Py_INCREF(odescr); + } + else { + if (swap) { + odescr = PyArray_DescrNewByteorder(descr, NPY_SWAP); + } + else { + odescr = descr; + Py_INCREF(odescr); + } + } + + needcopy = !is_aligned || astride != elsize; + if (!PyArray_EquivTypes(descr, odescr)) { + needcopy = 1; + } + if (needcopy) { buffer = PyDataMem_UserNEW(N * elsize, mem_handler); if (buffer == NULL) { @@ -1243,14 +1275,6 @@ _new_sortlike(PyArrayObject *op, int axis, PyArray_SortFunc *sort, memset(buffer, 0, N * elsize); } - if (swap) { - odescr = PyArray_DescrNewByteorder(descr, NPY_SWAP); - } - else { - odescr = descr; - Py_INCREF(odescr); - } - NPY_ARRAYMETHOD_FLAGS to_transfer_flags; if (PyArray_GetDTypeTransferFunction( @@ -1268,7 +1292,9 @@ _new_sortlike(PyArrayObject *op, int axis, PyArray_SortFunc *sort, } } - NPY_BEGIN_THREADS_DESCR(descr); + if (!needs_api) { + NPY_BEGIN_THREADS; + } while (size--) { char *bufptr = it->dataptr; @@ -1293,7 +1319,14 @@ _new_sortlike(PyArrayObject *op, int axis, PyArray_SortFunc *sort, */ if (part == NULL) { - ret = sort(bufptr, N, op); + if (strided_loop != NULL) { + char *const data[2] = {bufptr, bufptr}; + npy_intp strides[2] = {elsize, elsize}; + ret = strided_loop(context, data, &N, strides, NULL); + } + else { + ret = sort(bufptr, N, op); + } if (needs_api && PyErr_Occurred()) { ret = -1; } @@ -1331,7 +1364,9 @@ _new_sortlike(PyArrayObject *op, int axis, PyArray_SortFunc *sort, } fail: - NPY_END_THREADS_DESCR(descr); + if (!needs_api) { + NPY_END_THREADS; + } /* cleanup internal buffer */ if (needcopy) { PyArray_ClearBuffer(odescr, buffer, elsize, N, 1); @@ -1359,16 +1394,17 @@ _new_sortlike(PyArrayObject *op, int axis, PyArray_SortFunc *sort, static PyObject* _new_argsortlike(PyArrayObject *op, int axis, PyArray_ArgSortFunc *argsort, - PyArray_ArgPartitionFunc *argpart, - npy_intp const *kth, npy_intp nkth) + PyArrayMethod_StridedLoop *strided_loop, PyArrayMethod_Context *context, + NpyAuxData *auxdata, NPY_ARRAYMETHOD_FLAGS *method_flags, + PyArray_ArgPartitionFunc *argpart, npy_intp const *kth, npy_intp nkth) { npy_intp N = PyArray_DIM(op, axis); npy_intp elsize = (npy_intp)PyArray_ITEMSIZE(op); npy_intp astride = PyArray_STRIDE(op, axis); int swap = PyArray_ISBYTESWAPPED(op); int is_aligned = IsAligned(op); - int needcopy = !is_aligned || swap || astride != elsize; - int needs_api = PyDataType_FLAGCHK(PyArray_DESCR(op), NPY_NEEDS_PYAPI); + int needcopy = 0; + int needs_api; int needidxbuffer; char *valbuffer = NULL; @@ -1405,6 +1441,13 @@ _new_argsortlike(PyArrayObject *op, int axis, PyArray_ArgSortFunc *argsort, rstride = PyArray_STRIDE(rop, axis); needidxbuffer = rstride != sizeof(npy_intp); + if (strided_loop != NULL) { + needs_api = *method_flags & NPY_METH_REQUIRES_PYAPI; + } + else { + needs_api = PyDataType_FLAGCHK(PyArray_DESCR(op), NPY_NEEDS_PYAPI); + } + /* Check if there is any argsorting to do */ if (N <= 1 || PyArray_SIZE(op) == 0) { Py_DECREF(mem_handler); @@ -1420,6 +1463,26 @@ _new_argsortlike(PyArrayObject *op, int axis, PyArray_ArgSortFunc *argsort, } size = it->size; + if (strided_loop != NULL) { + // Descriptors have already been resolved + odescr = context->descriptors[0]; + Py_INCREF(odescr); + } + else { + if (swap) { + odescr = PyArray_DescrNewByteorder(descr, NPY_SWAP); + } + else { + odescr = descr; + Py_INCREF(odescr); + } + } + + needcopy = !is_aligned || astride != elsize; + if (!PyArray_EquivTypes(descr, odescr)) { + needcopy = 1; + } + if (needcopy) { valbuffer = PyDataMem_UserNEW(N * elsize, mem_handler); if (valbuffer == NULL) { @@ -1430,14 +1493,6 @@ _new_argsortlike(PyArrayObject *op, int axis, PyArray_ArgSortFunc *argsort, memset(valbuffer, 0, N * elsize); } - if (swap) { - odescr = PyArray_DescrNewByteorder(descr, NPY_SWAP); - } - else { - odescr = descr; - Py_INCREF(odescr); - } - if (PyArray_GetDTypeTransferFunction( is_aligned, astride, elsize, descr, odescr, 0, &cast_info, &transfer_flags) != NPY_SUCCEED) { @@ -1454,7 +1509,9 @@ _new_argsortlike(PyArrayObject *op, int axis, PyArray_ArgSortFunc *argsort, } } - NPY_BEGIN_THREADS_DESCR(descr); + if (!needs_api) { + NPY_BEGIN_THREADS; + } while (size--) { char *valptr = it->dataptr; @@ -1483,7 +1540,14 @@ _new_argsortlike(PyArrayObject *op, int axis, PyArray_ArgSortFunc *argsort, } if (argpart == NULL) { - ret = argsort(valptr, idxptr, N, op); + if (strided_loop != NULL) { + char *const data[2] = {valptr, (char *)idxptr}; + npy_intp strides[2] = {elsize, sizeof(npy_intp)}; + ret = strided_loop(context, data, &N, strides, NULL); + } + else { + ret = argsort(valptr, idxptr, N, op); + } /* Object comparisons may raise an exception */ if (needs_api && PyErr_Occurred()) { ret = -1; @@ -1523,7 +1587,9 @@ _new_argsortlike(PyArrayObject *op, int axis, PyArray_ArgSortFunc *argsort, } fail: - NPY_END_THREADS_DESCR(descr); + if (!needs_api) { + NPY_END_THREADS; + } /* cleanup internal buffers */ if (needcopy) { PyArray_ClearBuffer(odescr, valbuffer, elsize, N, 1); @@ -1548,56 +1614,6 @@ _new_argsortlike(PyArrayObject *op, int axis, PyArray_ArgSortFunc *argsort, } -/*NUMPY_API - * Sort an array in-place - */ -NPY_NO_EXPORT int -PyArray_Sort(PyArrayObject *op, int axis, NPY_SORTKIND which) -{ - PyArray_SortFunc *sort = NULL; - int n = PyArray_NDIM(op); - - if (check_and_adjust_axis(&axis, n) < 0) { - return -1; - } - - if (PyArray_FailUnlessWriteable(op, "sort array") < 0) { - return -1; - } - - if (which < 0 || which >= NPY_NSORTS) { - PyErr_SetString(PyExc_ValueError, "not a valid sort kind"); - return -1; - } - - sort = PyDataType_GetArrFuncs(PyArray_DESCR(op))->sort[which]; - - if (sort == NULL) { - if (PyDataType_GetArrFuncs(PyArray_DESCR(op))->compare) { - switch (which) { - default: - case NPY_QUICKSORT: - sort = npy_quicksort; - break; - case NPY_HEAPSORT: - sort = npy_heapsort; - break; - case NPY_STABLESORT: - sort = npy_timsort; - break; - } - } - else { - PyErr_SetString(PyExc_TypeError, - "type does not have compare function"); - return -1; - } - } - - return _new_sortlike(op, axis, sort, NULL, NULL, 0); -} - - /* * make kth array positive, ravel and sort it */ @@ -1702,7 +1718,7 @@ PyArray_Partition(PyArrayObject *op, PyArrayObject * ktharray, int axis, return -1; } - ret = _new_sortlike(op, axis, sort, part, + ret = _new_sortlike(op, axis, sort, NULL, NULL, NULL, NULL, part, PyArray_DATA(kthrvl), PyArray_SIZE(kthrvl)); Py_DECREF(kthrvl); @@ -1711,52 +1727,6 @@ PyArray_Partition(PyArrayObject *op, PyArrayObject * ktharray, int axis, } -/*NUMPY_API - * ArgSort an array - */ -NPY_NO_EXPORT PyObject * -PyArray_ArgSort(PyArrayObject *op, int axis, NPY_SORTKIND which) -{ - PyArrayObject *op2; - PyArray_ArgSortFunc *argsort = NULL; - PyObject *ret; - - argsort = PyDataType_GetArrFuncs(PyArray_DESCR(op))->argsort[which]; - - if (argsort == NULL) { - if (PyDataType_GetArrFuncs(PyArray_DESCR(op))->compare) { - switch (which) { - default: - case NPY_QUICKSORT: - argsort = npy_aquicksort; - break; - case NPY_HEAPSORT: - argsort = npy_aheapsort; - break; - case NPY_STABLESORT: - argsort = npy_atimsort; - break; - } - } - else { - PyErr_SetString(PyExc_TypeError, - "type does not have compare function"); - return NULL; - } - } - - op2 = (PyArrayObject *)PyArray_CheckAxis(op, &axis, 0); - if (op2 == NULL) { - return NULL; - } - - ret = _new_argsortlike(op2, axis, argsort, NULL, NULL, 0); - - Py_DECREF(op2); - return ret; -} - - /*NUMPY_API * ArgPartition an array */ @@ -1804,7 +1774,7 @@ PyArray_ArgPartition(PyArrayObject *op, PyArrayObject *ktharray, int axis, return NULL; } - ret = _new_argsortlike(op2, axis, argsort, argpart, + ret = _new_argsortlike(op2, axis, argsort, NULL, NULL, NULL, NULL, argpart, PyArray_DATA(kthrvl), PyArray_SIZE(kthrvl)); Py_DECREF(kthrvl); @@ -2525,11 +2495,13 @@ count_nonzero_u8(const char *data, npy_intp bstride, npy_uintp len) len -= len_m; count = len_m - zcount; #else - if (!NPY_ALIGNMENT_REQUIRED || npy_is_aligned(data, sizeof(npy_uint64))) { + if (npy_is_aligned(data, sizeof(npy_uint64))) { int step = 6 * sizeof(npy_uint64); int left_bytes = len % step; for (const char *end = data + len; data < end - left_bytes; data += step) { - count += count_nonzero_bytes_384((const npy_uint64 *)data); + npy_uint64 arr[6]; + memcpy(arr, data, step); + count += count_nonzero_bytes_384(arr); } len = left_bytes; } @@ -3151,3 +3123,241 @@ PyArray_MultiIndexSetItem(PyArrayObject *self, const npy_intp *multi_index, return PyArray_Pack(PyArray_DESCR(self), data, obj); } + + +/* Table of generic sort functions for use in PyArray_SortEx*/ +static PyArray_SortFunc* const generic_sort_table[] = {npy_quicksort, + npy_heapsort, + npy_timsort}; + +/*NUMPY_API + * Sort an array in-place with extended parameters + */ +NPY_NO_EXPORT int +PyArray_Sort(PyArrayObject *op, int axis, NPY_SORTKIND flags) +{ + PyArrayMethodObject *sort_method = NULL; + PyArrayMethod_StridedLoop *strided_loop = NULL; + PyArrayMethod_SortParameters sort_params = {.flags = flags}; + PyArrayMethod_Context context = {0}; + PyArray_Descr *loop_descrs[2]; + NpyAuxData *auxdata = NULL; + NPY_ARRAYMETHOD_FLAGS method_flags = 0; + + PyArray_SortFunc **sort_table = NULL; + PyArray_SortFunc *sort = NULL; + + int ret; + + if (check_and_adjust_axis(&axis, PyArray_NDIM(op)) < 0) { + return -1; + } + + if (PyArray_FailUnlessWriteable(op, "sort array") < 0) { + return -1; + } + + // Zero the NPY_HEAPSORT bit, maps NPY_HEAPSORT to NPY_QUICKSORT + flags &= ~_NPY_SORT_HEAPSORT; + + // Look for type specific functions + sort_method = NPY_DT_SLOTS(NPY_DTYPE(PyArray_DESCR(op)))->sort_meth; + if (sort_method != NULL) { + PyArray_Descr *descr = PyArray_DESCR(op); + PyArray_DTypeMeta *dt = NPY_DTYPE(descr); + + PyArray_DTypeMeta *dtypes[2] = {dt, dt}; + PyArray_Descr *given_descrs[2] = {descr, descr}; + // Sort cannot be a view, so view_offset is unused + npy_intp view_offset = 0; + + if (sort_method->resolve_descriptors( + sort_method, dtypes, given_descrs, loop_descrs, &view_offset) < 0) { + PyErr_SetString(PyExc_RuntimeError, + "unable to resolve descriptors for sort"); + return -1; + } + context.descriptors = loop_descrs; + context.parameters = &sort_params; + context.method = sort_method; + + // Arrays are always contiguous for sorting + npy_intp strides[2] = {loop_descrs[0]->elsize, loop_descrs[1]->elsize}; + + if (sort_method->get_strided_loop( + &context, 1, 0, strides, &strided_loop, &auxdata, &method_flags) < 0) { + ret = -1; + goto fail; + } + } + else { + sort_table = PyDataType_GetArrFuncs(PyArray_DESCR(op))->sort; + switch (flags) { + case NPY_SORT_DEFAULT: + sort = sort_table[NPY_QUICKSORT]; + break; + case NPY_SORT_STABLE: + sort = sort_table[NPY_STABLESORT]; + break; + default: + break; + } + + // Look for appropriate generic function if no type specific version + if (sort == NULL) { + if (!PyDataType_GetArrFuncs(PyArray_DESCR(op))->compare) { + PyErr_SetString(PyExc_TypeError, + "type does not have compare function"); + return -1; + } + switch (flags) { + case NPY_SORT_DEFAULT: + sort = generic_sort_table[NPY_QUICKSORT]; + break; + case NPY_SORT_STABLE: + sort = generic_sort_table[NPY_STABLESORT]; + break; + default: + break; + } + } + + if (sort == NULL) { + PyErr_SetString(PyExc_TypeError, + "no current sort function meets the requirements"); + return -1; + } + } + + ret = _new_sortlike(op, axis, sort, strided_loop, + &context, auxdata, &method_flags, NULL, NULL, 0); + +fail: + if (sort_method != NULL) { + NPY_AUXDATA_FREE(auxdata); + Py_DECREF(context.descriptors[0]); + Py_DECREF(context.descriptors[1]); + } + return ret; +} + +/* Table of generic argsort function for use by PyArray_ArgSortEx */ +static PyArray_ArgSortFunc* const generic_argsort_table[] = {npy_aquicksort, + npy_aheapsort, + npy_atimsort}; + +/*NUMPY_API + * ArgSort an array with extended parameters + */ +NPY_NO_EXPORT PyObject * +PyArray_ArgSort(PyArrayObject *op, int axis, NPY_SORTKIND flags) +{ + PyArrayObject *op2; + PyObject *ret; + PyArrayMethodObject *argsort_method = NULL; + PyArrayMethod_StridedLoop *strided_loop = NULL; + PyArrayMethod_SortParameters sort_params = {.flags = flags}; + PyArrayMethod_Context context = {0}; + PyArray_Descr *loop_descrs[2]; + NpyAuxData *auxdata = NULL; + NPY_ARRAYMETHOD_FLAGS method_flags = 0; + + PyArray_ArgSortFunc **argsort_table = NULL; + PyArray_ArgSortFunc *argsort = NULL; + + // Zero the NPY_HEAPSORT bit, maps NPY_HEAPSORT to NPY_QUICKSORT + flags &= ~_NPY_SORT_HEAPSORT; + + // Look for type specific functions + argsort_method = NPY_DT_SLOTS(NPY_DTYPE(PyArray_DESCR(op)))->argsort_meth; + if (argsort_method != NULL) { + PyArray_Descr *descr = PyArray_DESCR(op); + PyArray_Descr *odescr = PyArray_DescrFromType(NPY_INTP); + PyArray_DTypeMeta *dt = NPY_DTYPE(descr); + PyArray_DTypeMeta *odt = NPY_DTYPE(odescr); + + PyArray_DTypeMeta *dtypes[2] = {dt, odt}; + PyArray_Descr *given_descrs[2] = {descr, odescr}; + // we can ignore the view_offset for sorting + npy_intp view_offset = 0; + + int resolve_ret = argsort_method->resolve_descriptors( + argsort_method, dtypes, given_descrs, loop_descrs, &view_offset); + Py_DECREF(odescr); + if (resolve_ret < 0) { + PyErr_SetString(PyExc_RuntimeError, + "unable to resolve descriptors for argsort"); + return NULL; + } + context.descriptors = loop_descrs; + context.parameters = &sort_params; + context.method = argsort_method; + + // Arrays are always contiguous for sorting + npy_intp strides[2] = {loop_descrs[0]->elsize, loop_descrs[1]->elsize}; + + if (argsort_method->get_strided_loop( + &context, 1, 0, strides, &strided_loop, &auxdata, &method_flags) < 0) { + ret = NULL; + goto fail; + } + } + else { + argsort_table = PyDataType_GetArrFuncs(PyArray_DESCR(op))->argsort; + switch (flags) { + case NPY_SORT_DEFAULT: + argsort = argsort_table[NPY_QUICKSORT]; + break; + case NPY_SORT_STABLE: + argsort = argsort_table[NPY_STABLESORT]; + break; + default: + break; + } + + // Look for generic function if no type specific version + if (argsort == NULL) { + if (!PyDataType_GetArrFuncs(PyArray_DESCR(op))->compare) { + PyErr_SetString(PyExc_TypeError, + "type does not have compare function"); + return NULL; + } + switch (flags) { + case NPY_SORT_DEFAULT: + argsort = generic_argsort_table[NPY_QUICKSORT]; + break; + case NPY_SORT_STABLE: + argsort = generic_argsort_table[NPY_STABLESORT]; + break; + default: + break; + } + } + + if (argsort == NULL) { + PyErr_SetString(PyExc_TypeError, + "no current argsort function meets the requirements"); + return NULL; + } + } + + op2 = (PyArrayObject *)PyArray_CheckAxis(op, &axis, 0); + if (op2 == NULL) { + ret = NULL; + goto fail; + } + + ret = _new_argsortlike(op2, axis, argsort, strided_loop, + &context, auxdata, &method_flags, NULL, NULL, 0); + Py_DECREF(op2); + +fail: + if (argsort_method != NULL) { + NPY_AUXDATA_FREE(auxdata); + Py_DECREF(context.descriptors[0]); + Py_DECREF(context.descriptors[1]); + } + return ret; +} + + diff --git a/numpy/_core/src/multiarray/iterators.c b/numpy/_core/src/multiarray/iterators.c index 422c690882ab..1b4ed59fbfe0 100644 --- a/numpy/_core/src/multiarray/iterators.c +++ b/numpy/_core/src/multiarray/iterators.c @@ -23,83 +23,12 @@ #include "item_selection.h" #include "lowlevel_strided_loops.h" #include "array_assign.h" +#include "npy_pycompat.h" #define NEWAXIS_INDEX -1 #define ELLIPSIS_INDEX -2 #define SINGLE_INDEX -3 -/* - * Tries to convert 'o' into an npy_intp interpreted as an - * index. Returns 1 if it was successful, 0 otherwise. Does - * not set an exception. - */ -static int -coerce_index(PyObject *o, npy_intp *v) -{ - *v = PyArray_PyIntAsIntp(o); - - if ((*v) == -1 && PyErr_Occurred()) { - PyErr_Clear(); - return 0; - } - return 1; -} - -/* - * This function converts one element of the indexing tuple - * into a step size and a number of steps, returning the - * starting index. Non-slices are signalled in 'n_steps', - * as NEWAXIS_INDEX, ELLIPSIS_INDEX, or SINGLE_INDEX. - */ -NPY_NO_EXPORT npy_intp -parse_index_entry(PyObject *op, npy_intp *step_size, - npy_intp *n_steps, npy_intp max, - int axis, int check_index) -{ - npy_intp i; - - if (op == Py_None) { - *n_steps = NEWAXIS_INDEX; - i = 0; - } - else if (op == Py_Ellipsis) { - *n_steps = ELLIPSIS_INDEX; - i = 0; - } - else if (PySlice_Check(op)) { - npy_intp stop; - if (PySlice_GetIndicesEx(op, max, &i, &stop, step_size, n_steps) < 0) { - goto fail; - } - if (*n_steps <= 0) { - *n_steps = 0; - *step_size = 1; - i = 0; - } - } - else if (coerce_index(op, &i)) { - *n_steps = SINGLE_INDEX; - *step_size = 0; - if (check_index) { - if (check_and_adjust_index(&i, max, axis, NULL) < 0) { - goto fail; - } - } - } - else { - PyErr_SetString(PyExc_IndexError, - "each index entry must be either a " - "slice, an integer, Ellipsis, or " - "newaxis"); - goto fail; - } - return i; - - fail: - return -1; -} - - /*********************** Element-wise Array Iterator ***********************/ /* Aided by Peter J. Verveer's nd_image package and numpy's arraymap ****/ /* and Python's array iterator ***/ @@ -426,7 +355,7 @@ iter_length(PyArrayIterObject *self) } -static PyArrayObject * +static PyObject * iter_subscript_Bool(PyArrayIterObject *self, PyArrayObject *ind, NPY_cast_info *cast_info) { @@ -483,7 +412,7 @@ iter_subscript_Bool(PyArrayIterObject *self, PyArrayObject *ind, } PyArray_ITER_RESET(self); } - return ret; + return (PyObject *) ret; } static PyObject * @@ -561,195 +490,154 @@ iter_subscript_int(PyArrayIterObject *self, PyArrayObject *ind, NPY_NO_EXPORT PyObject * iter_subscript(PyArrayIterObject *self, PyObject *ind) { - PyArray_Descr *indtype = NULL; - PyArray_Descr *dtype; - npy_intp start, step_size; - npy_intp n_steps; - PyArrayObject *ret; - char *dptr; - int size; - PyObject *obj = NULL; - PyObject *new; + PyObject *ret = NULL; + + int index_type; + int index_num = -1; + int ndim, fancy_ndim; + npy_intp start, stop, step, n_steps; + npy_index_info indices[NPY_MAXDIMS * 2 + 1]; + + PyArray_Descr *dtype = PyArray_DESCR(self->ao); + npy_intp dtype_size = dtype->elsize; NPY_cast_info cast_info = {.func = NULL}; - if (ind == Py_Ellipsis) { - ind = PySlice_New(NULL, NULL, NULL); - obj = iter_subscript(self, ind); - Py_DECREF(ind); - return obj; + /* Prepare the indices */ + index_type = prepare_index_noarray(1, &self->size, ind, indices, &index_num, + &ndim, &fancy_ndim, 1, 1); + + if (index_type < 0) { + return NULL; } - if (PyTuple_Check(ind)) { - int len; - len = PyTuple_GET_SIZE(ind); - if (len > 1) { - goto fail; - } - if (len == 0) { - Py_INCREF(self->ao); - return (PyObject *)self->ao; - } - ind = PyTuple_GET_ITEM(ind, 0); + + else if (indices[0].type == HAS_NEWAXIS) { + PyErr_SetString(PyExc_IndexError, + "only integers, slices (`:`), ellipsis (`...`) and integer or boolean " + "arrays are valid indices" + ); + goto finish; } - /* - * Tuples >1d not accepted --- i.e. no newaxis - * Could implement this with adjusted strides and dimensions in iterator - * Check for Boolean -- this is first because Bool is a subclass of Int - */ - PyArray_ITER_RESET(self); + // Single ellipsis index + else if (index_type == HAS_ELLIPSIS) { + ind = PySlice_New(NULL, NULL, NULL); + if (ind == NULL) { + goto finish; + } - if (PyBool_Check(ind)) { - int istrue = PyObject_IsTrue(ind); - if (istrue == -1) { - goto fail; + ret = iter_subscript(self, ind); + Py_DECREF(ind); + goto finish; + } + + // Single boolean index + else if (indices[0].type == HAS_0D_BOOL) { + /* Deprecated 2025-07, NumPy 2.4 */ + if (DEPRECATE("Indexing flat iterators with a 0-dimensional boolean index is deprecated " + "and may be removed in a future version. (Deprecated NumPy 2.4)") < 0) { + goto finish; } - if (istrue) { - return PyArray_ToScalar(self->dataptr, self->ao); + if (indices[0].value) { + ret = PyArray_ToScalar(self->dataptr, self->ao); + goto finish; } else { /* empty array */ npy_intp ii = 0; - dtype = PyArray_DESCR(self->ao); Py_INCREF(dtype); - ret = (PyArrayObject *)PyArray_NewFromDescr(Py_TYPE(self->ao), - dtype, - 1, &ii, - NULL, NULL, 0, - (PyObject *)self->ao); - return (PyObject *)ret; + ret = PyArray_NewFromDescr(Py_TYPE(self->ao), + dtype, + 1, &ii, + NULL, NULL, 0, + (PyObject *)self->ao); + goto finish; } } - dtype = PyArray_DESCR(self->ao); - size = dtype->elsize; + PyArray_ITER_RESET(self); - /* set up a cast to handle item copying */ + if (index_type == HAS_INTEGER) { + if (check_and_adjust_index(&indices[0].value, self->size, -1, NULL) < 0) { + goto finish; + } + PyArray_ITER_GOTO1D(self, indices[0].value); + ret = PyArray_ToScalar(self->dataptr, self->ao); + PyArray_ITER_RESET(self); + goto finish; + } + /* set up a cast to handle item copying */ NPY_ARRAYMETHOD_FLAGS transfer_flags = 0; npy_intp one = 1; + /* We can assume the newly allocated output array is aligned */ int is_aligned = IsUintAligned(self->ao); if (PyArray_GetDTypeTransferFunction( - is_aligned, size, size, dtype, dtype, 0, &cast_info, + is_aligned, dtype_size, dtype_size, dtype, dtype, 0, &cast_info, &transfer_flags) < 0) { - goto fail; + goto finish; } - /* Check for Integer or Slice */ - if (PyLong_Check(ind) || PySlice_Check(ind)) { - start = parse_index_entry(ind, &step_size, &n_steps, - self->size, 0, 1); - if (start == -1) { - goto fail; - } - if (n_steps == ELLIPSIS_INDEX || n_steps == NEWAXIS_INDEX) { - PyErr_SetString(PyExc_IndexError, - "cannot use Ellipsis or newaxes here"); - goto fail; + if (index_type == HAS_SLICE) { + if (PySlice_GetIndicesEx(indices[0].object, + self->size, + &start, &stop, &step, &n_steps) < 0) { + goto finish; } + PyArray_ITER_GOTO1D(self, start); - if (n_steps == SINGLE_INDEX) { /* Integer */ - PyObject *tmp; - tmp = PyArray_ToScalar(self->dataptr, self->ao); - PyArray_ITER_RESET(self); - NPY_cast_info_xfree(&cast_info); - return tmp; - } Py_INCREF(dtype); - ret = (PyArrayObject *)PyArray_NewFromDescr(Py_TYPE(self->ao), + ret = PyArray_NewFromDescr(Py_TYPE(self->ao), dtype, 1, &n_steps, NULL, NULL, 0, (PyObject *)self->ao); if (ret == NULL) { - goto fail; + goto finish; } - dptr = PyArray_DATA(ret); + + char *dptr = PyArray_DATA((PyArrayObject *) ret); while (n_steps--) { char *args[2] = {self->dataptr, dptr}; - npy_intp transfer_strides[2] = {size, size}; + npy_intp transfer_strides[2] = {dtype_size, dtype_size}; if (cast_info.func(&cast_info.context, args, &one, transfer_strides, cast_info.auxdata) < 0) { - goto fail; + goto finish; } - start += step_size; + start += step; PyArray_ITER_GOTO1D(self, start); - dptr += size; + dptr += dtype_size; } PyArray_ITER_RESET(self); - NPY_cast_info_xfree(&cast_info); - return (PyObject *)ret; + goto finish; } - /* convert to INTP array if Integer array scalar or List */ - indtype = PyArray_DescrFromType(NPY_INTP); - if (PyArray_IsScalar(ind, Integer) || PyList_Check(ind)) { - Py_INCREF(indtype); - obj = PyArray_FromAny(ind, indtype, 0, 0, NPY_ARRAY_FORCECAST, NULL); - if (obj == NULL) { - goto fail; - } - } - else { - Py_INCREF(ind); - obj = ind; + if (index_type == HAS_BOOL) { + ret = iter_subscript_Bool(self, (PyArrayObject *) indices[0].object, &cast_info); + goto finish; } - if (!PyArray_Check(obj)) { - PyArrayObject *tmp_arr = (PyArrayObject *) PyArray_FROM_O(obj); - if (tmp_arr == NULL) { - goto fail; - } - - if (PyArray_SIZE(tmp_arr) == 0) { - PyArray_Descr *indtype = PyArray_DescrFromType(NPY_INTP); - Py_SETREF(obj, PyArray_FromArray(tmp_arr, indtype, NPY_ARRAY_FORCECAST)); - Py_DECREF(tmp_arr); - if (obj == NULL) { - goto fail; - } - } - else { - Py_SETREF(obj, (PyObject *) tmp_arr); + if (index_type == HAS_FANCY) { + PyArray_Descr *indtype = PyArray_DescrFromType(NPY_INTP); + PyArrayObject *cast_array = (PyArrayObject *) + PyArray_FromArray((PyArrayObject *) indices[0].object, indtype, NPY_ARRAY_FORCECAST); + if (cast_array == NULL) { + goto finish; } - } - - /* Check for Boolean array */ - if (PyArray_TYPE((PyArrayObject *)obj) == NPY_BOOL) { - ret = iter_subscript_Bool(self, (PyArrayObject *)obj, &cast_info); + ret = iter_subscript_int(self, cast_array, &cast_info); + Py_DECREF(cast_array); goto finish; } - /* Only integer arrays left */ - if (!PyArray_ISINTEGER((PyArrayObject *)obj)) { - goto fail; - } - - Py_INCREF(indtype); - new = PyArray_FromAny(obj, indtype, 0, 0, - NPY_ARRAY_FORCECAST | NPY_ARRAY_ALIGNED, NULL); - if (new == NULL) { - goto fail; - } - ret = (PyArrayObject *)iter_subscript_int(self, (PyArrayObject *)new, - &cast_info); - Py_DECREF(new); - - finish: - Py_DECREF(indtype); - Py_DECREF(obj); + PyErr_SetString(PyExc_IndexError, + "only integers, slices (`:`), ellipsis (`...`) and integer or boolean " + "arrays are valid indices" + ); +finish: NPY_cast_info_xfree(&cast_info); - return (PyObject *)ret; - - fail: - if (!PyErr_Occurred()) { - PyErr_SetString(PyExc_IndexError, "unsupported iterator index"); + for (int i = 0; i < index_num; i++) { + Py_XDECREF(indices[i].object); } - Py_XDECREF(indtype); - Py_XDECREF(obj); - NPY_cast_info_xfree(&cast_info); - - return NULL; - + return ret; } @@ -857,140 +745,132 @@ iter_ass_sub_int(PyArrayIterObject *self, PyArrayObject *ind, NPY_NO_EXPORT int iter_ass_subscript(PyArrayIterObject *self, PyObject *ind, PyObject *val) { - PyArrayObject *arrval = NULL; - PyArrayIterObject *val_it = NULL; - PyArray_Descr *type; - PyArray_Descr *indtype = NULL; - int retval = -1; - npy_intp start, step_size; - npy_intp n_steps; - PyObject *obj = NULL; - NPY_cast_info cast_info = {.func = NULL}; - if (val == NULL) { PyErr_SetString(PyExc_TypeError, "Cannot delete iterator elements"); return -1; } - if (PyArray_FailUnlessWriteable(self->ao, "underlying array") < 0) + if (PyArray_FailUnlessWriteable(self->ao, "underlying array") < 0) { return -1; + } - if (ind == Py_Ellipsis) { - ind = PySlice_New(NULL, NULL, NULL); - retval = iter_ass_subscript(self, ind, val); - Py_DECREF(ind); - return retval; + int ret = -1; + + int index_type; + int index_num = -1; + int ndim, fancy_ndim; + npy_intp start, stop, step, n_steps; + npy_index_info indices[NPY_MAXDIMS * 2 + 1]; + + PyArray_Descr *dtype = PyArray_DESCR(self->ao); + npy_intp dtype_size = dtype->elsize; + NPY_cast_info cast_info = {.func = NULL}; + + /* Prepare the indices */ + index_type = prepare_index_noarray(1, &self->size, ind, indices, &index_num, + &ndim, &fancy_ndim, 1, 1); + + if (index_type < 0) { + goto finish; + } + else if (indices[0].type == HAS_NEWAXIS) { + PyErr_SetString(PyExc_IndexError, + "only integers, slices (`:`), ellipsis (`...`) and integer or boolean " + "arrays are valid indices" + ); + goto finish; } - if (PyTuple_Check(ind)) { - int len; - len = PyTuple_GET_SIZE(ind); - if (len > 1) { + // Single ellipsis index + else if (index_type == HAS_ELLIPSIS) { + if (PyTuple_Check(ind)) { + PyErr_SetString(PyExc_IndexError, "Assigning to a flat iterator with a 0-D index is not supported"); goto finish; } - ind = PyTuple_GET_ITEM(ind, 0); - } - type = PyArray_DESCR(self->ao); + ind = PySlice_New(NULL, NULL, NULL); + if (ind == NULL) { + goto finish; + } - /* - * Check for Boolean -- this is first because - * Bool is a subclass of Int - */ - - if (PyBool_Check(ind)) { - retval = 0; - int istrue = PyObject_IsTrue(ind); - if (istrue == -1) { - return -1; + ret = iter_ass_subscript(self, ind, val); + Py_DECREF(ind); + goto finish; + } + + // Single boolean index + else if (indices[0].type == HAS_0D_BOOL) { + /* Deprecated 2025-07, NumPy 2.4 */ + if (DEPRECATE("Indexing flat iterators with a 0-dimensional boolean index is deprecated " + "and may be removed in a future version. (Deprecated NumPy 2.4)") < 0) { + goto finish; } - if (istrue) { - retval = PyArray_Pack( - PyArray_DESCR(self->ao), self->dataptr, val); + ret = 0; + if (indices[0].value) { + ret = PyArray_Pack(PyArray_DESCR(self->ao), self->dataptr, val); } goto finish; } - if (PySequence_Check(ind) || PySlice_Check(ind)) { - goto skip; - } - start = PyArray_PyIntAsIntp(ind); - if (error_converting(start)) { - PyErr_Clear(); - } - else { - if (check_and_adjust_index(&start, self->size, -1, NULL) < 0) { + PyArray_ITER_RESET(self); + + if (index_type == HAS_INTEGER) { + if (check_and_adjust_index(&indices[0].value, self->size, -1, NULL) < 0) { goto finish; } - PyArray_ITER_GOTO1D(self, start); - retval = PyArray_Pack(PyArray_DESCR(self->ao), self->dataptr, val); + PyArray_ITER_GOTO1D(self, indices[0].value); + ret = PyArray_Pack(PyArray_DESCR(self->ao), self->dataptr, val); PyArray_ITER_RESET(self); - if (retval < 0) { + if (ret < 0) { PyErr_SetString(PyExc_ValueError, "Error setting single item of array."); } goto finish; } - skip: - Py_INCREF(type); - arrval = (PyArrayObject *)PyArray_FromAny(val, type, 0, 0, + Py_INCREF(dtype); + PyArrayObject *arrval = (PyArrayObject *)PyArray_FromAny(val, dtype, 0, 0, NPY_ARRAY_FORCECAST, NULL); if (arrval == NULL) { - return -1; + goto finish; } - val_it = (PyArrayIterObject *)PyArray_IterNew((PyObject *)arrval); + PyArrayIterObject *val_it = (PyArrayIterObject *)PyArray_IterNew((PyObject *)arrval); if (val_it == NULL) { goto finish; } if (val_it->size == 0) { - retval = 0; + ret = 0; goto finish; } /* set up cast to handle single-element copies into arrval */ NPY_ARRAYMETHOD_FLAGS transfer_flags = 0; npy_intp one = 1; - int itemsize = type->elsize; /* We can assume the newly allocated array is aligned */ int is_aligned = IsUintAligned(self->ao); if (PyArray_GetDTypeTransferFunction( - is_aligned, itemsize, itemsize, type, type, 0, + is_aligned, dtype_size, dtype_size, PyArray_DESCR(arrval), dtype, 0, &cast_info, &transfer_flags) < 0) { goto finish; } - /* Check Slice */ - if (PySlice_Check(ind)) { - start = parse_index_entry(ind, &step_size, &n_steps, self->size, 0, 0); - if (start == -1) { - goto finish; - } - if (n_steps == ELLIPSIS_INDEX || n_steps == NEWAXIS_INDEX) { - PyErr_SetString(PyExc_IndexError, - "cannot use Ellipsis or newaxes here"); + if (index_type == HAS_SLICE) { + if (PySlice_GetIndicesEx(indices[0].object, + self->size, + &start, &stop, &step, &n_steps) < 0) { goto finish; } + PyArray_ITER_GOTO1D(self, start); - npy_intp transfer_strides[2] = {itemsize, itemsize}; - if (n_steps == SINGLE_INDEX) { - char *args[2] = {PyArray_DATA(arrval), self->dataptr}; - if (cast_info.func(&cast_info.context, args, &one, - transfer_strides, cast_info.auxdata) < 0) { - goto finish; - } - PyArray_ITER_RESET(self); - retval = 0; - goto finish; - } + npy_intp transfer_strides[2] = {dtype_size, dtype_size}; while (n_steps--) { char *args[2] = {val_it->dataptr, self->dataptr}; if (cast_info.func(&cast_info.context, args, &one, transfer_strides, cast_info.auxdata) < 0) { goto finish; } - start += step_size; + start += step; PyArray_ITER_GOTO1D(self, start); PyArray_ITER_NEXT(val_it); if (val_it->index == val_it->size) { @@ -998,60 +878,37 @@ iter_ass_subscript(PyArrayIterObject *self, PyObject *ind, PyObject *val) } } PyArray_ITER_RESET(self); - retval = 0; + ret = 0; goto finish; } - /* convert to INTP array if Integer array scalar or List */ - indtype = PyArray_DescrFromType(NPY_INTP); - if (PyList_Check(ind)) { - Py_INCREF(indtype); - obj = PyArray_FromAny(ind, indtype, 0, 0, NPY_ARRAY_FORCECAST, NULL); - } - else { - Py_INCREF(ind); - obj = ind; + if (index_type == HAS_BOOL) { + ret = iter_ass_sub_Bool(self, (PyArrayObject *) indices[0].object, val_it, &cast_info); + goto finish; } - if (obj != NULL && PyArray_Check(obj)) { - /* Check for Boolean object */ - if (PyArray_TYPE((PyArrayObject *)obj)==NPY_BOOL) { - if (iter_ass_sub_Bool(self, (PyArrayObject *)obj, - val_it, &cast_info) < 0) { - goto finish; - } - retval=0; - } - /* Check for integer array */ - else if (PyArray_ISINTEGER((PyArrayObject *)obj)) { - PyObject *new; - Py_INCREF(indtype); - new = PyArray_CheckFromAny(obj, indtype, 0, 0, - NPY_ARRAY_FORCECAST | NPY_ARRAY_BEHAVED_NS, NULL); - Py_DECREF(obj); - obj = new; - if (new == NULL) { - goto finish; - } - if (iter_ass_sub_int(self, (PyArrayObject *)obj, - val_it, &cast_info) < 0) { - goto finish; - } - retval = 0; + if (index_type == HAS_FANCY) { + PyArray_Descr *indtype = PyArray_DescrFromType(NPY_INTP); + PyArrayObject *cast_array = (PyArrayObject *) + PyArray_FromArray((PyArrayObject *) indices[0].object, indtype, NPY_ARRAY_FORCECAST); + if (cast_array == NULL) { + goto finish; } + ret = iter_ass_sub_int(self, cast_array, val_it, &cast_info); + Py_DECREF(cast_array); + goto finish; } - finish: - if (!PyErr_Occurred() && retval < 0) { - PyErr_SetString(PyExc_IndexError, "unsupported iterator index"); - } - Py_XDECREF(indtype); - Py_XDECREF(obj); - Py_XDECREF(val_it); - Py_XDECREF(arrval); + PyErr_SetString(PyExc_IndexError, + "only integers, slices (`:`), ellipsis (`...`) and integer or boolean " + "arrays are valid indices" + ); +finish: NPY_cast_info_xfree(&cast_info); - return retval; - + for (int i = 0; i < index_num; i++) { + Py_XDECREF(indices[i].object); + } + return ret; } @@ -1476,6 +1333,7 @@ PyArray_MultiIterNew(int n, ...) return multiiter_new_impl(n, args_impl); } + static PyObject* arraymultiter_new(PyTypeObject *NPY_UNUSED(subtype), PyObject *args, PyObject *kwds) @@ -1488,18 +1346,19 @@ arraymultiter_new(PyTypeObject *NPY_UNUSED(subtype), PyObject *args, "keyword arguments not accepted."); return NULL; } - - fast_seq = PySequence_Fast(args, ""); // needed for pypy + fast_seq = PySequence_Fast(args, ""); // noqa: borrowed-ref OK if (fast_seq == NULL) { return NULL; } + NPY_BEGIN_CRITICAL_SECTION_SEQUENCE_FAST(args) n = PySequence_Fast_GET_SIZE(fast_seq); if (n > NPY_MAXARGS) { - Py_DECREF(fast_seq); - return multiiter_wrong_number_of_args(); + ret = multiiter_wrong_number_of_args(); + } else { + ret = multiiter_new_impl(n, PySequence_Fast_ITEMS(fast_seq)); } - ret = multiiter_new_impl(n, PySequence_Fast_ITEMS(fast_seq)); Py_DECREF(fast_seq); + NPY_END_CRITICAL_SECTION_SEQUENCE_FAST() return ret; } diff --git a/numpy/_core/src/multiarray/legacy_dtype_implementation.c b/numpy/_core/src/multiarray/legacy_dtype_implementation.c index 70b4fa1e49db..eee7ce492fab 100644 --- a/numpy/_core/src/multiarray/legacy_dtype_implementation.c +++ b/numpy/_core/src/multiarray/legacy_dtype_implementation.c @@ -320,8 +320,8 @@ can_cast_fields(PyObject *field1, PyObject *field2, NPY_CASTING casting) /* Iterate over all the fields and compare for castability */ ppos = 0; - while (PyDict_Next(field1, &ppos, &key, &tuple1)) { - if ((tuple2 = PyDict_GetItem(field2, key)) == NULL) { + while (PyDict_Next(field1, &ppos, &key, &tuple1)) { // noqa: borrowed-ref OK + if ((tuple2 = PyDict_GetItem(field2, key)) == NULL) { // noqa: borrowed-ref OK return 0; } /* Compare the dtype of the field for castability */ @@ -367,12 +367,12 @@ PyArray_LegacyCanCastTypeTo(PyArray_Descr *from, PyArray_Descr *to, * field; recurse just in case the single field is itself structured. */ if (!PyDataType_HASFIELDS(to) && !PyDataType_ISOBJECT(to)) { - if (casting == NPY_UNSAFE_CASTING && + if ((casting == NPY_UNSAFE_CASTING || ((casting & NPY_SAME_VALUE_CASTING_FLAG) > 0)) && PyDict_Size(lfrom->fields) == 1) { Py_ssize_t ppos = 0; PyObject *tuple; PyArray_Descr *field; - PyDict_Next(lfrom->fields, &ppos, NULL, &tuple); + PyDict_Next(lfrom->fields, &ppos, NULL, &tuple); // noqa: borrowed-ref OK field = (PyArray_Descr *)PyTuple_GET_ITEM(tuple, 0); /* * For a subarray, we need to get the underlying type; @@ -399,7 +399,7 @@ PyArray_LegacyCanCastTypeTo(PyArray_Descr *from, PyArray_Descr *to, * casting; this is not correct, but needed since the treatment in can_cast * below got out of sync with astype; see gh-13667. */ - if (casting == NPY_UNSAFE_CASTING) { + if (casting == NPY_UNSAFE_CASTING || (casting & NPY_SAME_VALUE_CASTING_FLAG) > 0) { return 1; } } @@ -408,14 +408,14 @@ PyArray_LegacyCanCastTypeTo(PyArray_Descr *from, PyArray_Descr *to, * If "from" is a simple data type and "to" has fields, then only * unsafe casting works (and that works always, even to multiple fields). */ - return casting == NPY_UNSAFE_CASTING; + return (casting == NPY_UNSAFE_CASTING || (casting & NPY_SAME_VALUE_CASTING_FLAG) > 0); } /* * Everything else we consider castable for unsafe for now. * FIXME: ensure what we do here is consistent with "astype", * i.e., deal more correctly with subarrays and user-defined dtype. */ - else if (casting == NPY_UNSAFE_CASTING) { + else if (casting == NPY_UNSAFE_CASTING || (casting & NPY_SAME_VALUE_CASTING_FLAG) > 0) { return 1; } /* diff --git a/numpy/_core/src/multiarray/lowlevel_strided_loops.c.src b/numpy/_core/src/multiarray/lowlevel_strided_loops.c.src index 01ffd225274f..050207ea188c 100644 --- a/numpy/_core/src/multiarray/lowlevel_strided_loops.c.src +++ b/numpy/_core/src/multiarray/lowlevel_strided_loops.c.src @@ -17,6 +17,7 @@ #include #include #include +#include #include "lowlevel_strided_loops.h" #include "array_assign.h" @@ -24,6 +25,7 @@ #include "usertypes.h" #include "umathmodule.h" +#include "gil_utils.h" /* * x86 platform works with unaligned access but the compiler is allowed to @@ -33,11 +35,7 @@ * instructions (16 byte). * So this flag can only be enabled if autovectorization is disabled. */ -#if NPY_ALIGNMENT_REQUIRED -# define NPY_USE_UNALIGNED_ACCESS 0 -#else -# define NPY_USE_UNALIGNED_ACCESS 0 -#endif +#define NPY_USE_UNALIGNED_ACCESS 0 #define _NPY_NOP1(x) (x) #define _NPY_NOP2(x) (x) @@ -718,6 +716,59 @@ NPY_NO_EXPORT PyArrayMethod_StridedLoop * typedef npy_half _npy_half; #endif +#if EMULATED_FP16 +/* half-to-double, copied from CPP to allow inlining */ + +static NPY_GCC_OPT_3 +uint64_t ToDoubleBits(uint16_t h) +{ + uint16_t h_exp = (h&0x7c00u); + uint64_t d_sgn = ((uint64_t)h&0x8000u) << 48; + switch (h_exp) { + case 0x0000u: { // 0 or subnormal + uint16_t h_sig = (h&0x03ffu); + // Signed zero + if (h_sig == 0) { + return d_sgn; + } + // Subnormal + h_sig <<= 1; + while ((h_sig&0x0400u) == 0) { + h_sig <<= 1; + h_exp++; + } + uint64_t d_exp = ((uint64_t)(1023 - 15 - h_exp)) << 52; + uint64_t d_sig = ((uint64_t)(h_sig&0x03ffu)) << 42; + return d_sgn + d_exp + d_sig; + } + case 0x7c00u: // inf or NaN + // All-ones exponent and a copy of the significand + return d_sgn + 0x7ff0000000000000ULL + (((uint64_t)(h&0x03ffu)) << 42); + default: // normalized + // Just need to adjust the exponent and shift + return d_sgn + (((uint64_t)(h&0x7fffu) + 0xfc000u) << 42); + } +} + +NPY_FINLINE +npy_uint64 _npy_halfbits_to_doublebits(npy_uint16 h){ + /* + * Use npymath versions for all the special cases, only inline the + * x86_64 non-intrinsic case. Someday we will rewrite this in CPP and + * can then explore inlining more + */ + #if defined(NPY_HAVE_AVX512FP16) + return npy_halfbits_to_doublebits(h); + #elif defined(NPY_HAVE_VSX3) && defined(NPY_HAVE_VSX3_HALF_DOUBLE) + return npy_halfbits_to_doublebits(h); + #elif defined(__ARM_FP16_FORMAT_IEEE) + return npy_halfbits_to_doublebits(h); + #else + return ToDoubleBits(h); + #endif +} +#endif + /**begin repeat * * #NAME1 = BOOL, @@ -746,6 +797,7 @@ NPY_NO_EXPORT PyArrayMethod_StridedLoop * * #is_float1 = 0*12, 1, 0, 0, 1, 0, 0# * #is_double1 = 0*13, 1, 0, 0, 1, 0# * #is_complex1 = 0*15, 1*3# + * #is_unsigned1 = 1*6, 0*12# */ /**begin repeat1 @@ -770,6 +822,16 @@ NPY_NO_EXPORT PyArrayMethod_StridedLoop * * npy_byte, npy_short, npy_int, npy_long, npy_longlong, * _npy_half, npy_float, npy_double, npy_longdouble, * npy_float, npy_double, npy_longdouble# + * #type2max = 0, + * UCHAR_MAX, USHRT_MAX, UINT_MAX, ULONG_MAX, ULLONG_MAX, + * SCHAR_MAX, SHRT_MAX, INT_MAX, LONG_MAX, LLONG_MAX, + * 65500.0f, FLT_MAX, DBL_MAX, LDBL_MAX, + * FLT_MAX, DBL_MAX, LDBL_MAX# + * #type2min = 0, + * 0, 0, 0, 0, 0, + * SCHAR_MIN, SHRT_MIN, INT_MIN, LONG_MIN, LLONG_MIN, + * -65500.0f, -FLT_MAX, -DBL_MAX, -LDBL_MAX, + * -FLT_MAX, -DBL_MAX, -LDBL_MAX# * #is_bool2 = 1, 0*17# * #is_emu_half2 = 0*11, EMULATED_FP16, 0*6# * #is_native_half2 = 0*11, NATIVE_FP16, 0*6# @@ -786,7 +848,10 @@ NPY_NO_EXPORT PyArrayMethod_StridedLoop * #if !(NPY_USE_UNALIGNED_ACCESS && !@aligned@) -/* For emulated half types, don't use actual double/float types in conversion */ +/* + * For emulated half types, don't use actual double/float types in conversion + * except for *_check_same_value_*(), follow _ROUND_TRIP and _TO_RTYPE1. + */ #if @is_emu_half1@ || @is_emu_half2@ # if @is_float1@ @@ -814,47 +879,58 @@ NPY_NO_EXPORT PyArrayMethod_StridedLoop * /* Determine an appropriate casting conversion function */ #if @is_emu_half1@ - +# define _TO_RTYPE1(x) npy_half_to_float(x) # if @is_float2@ # define _CONVERT_FN(x) npy_halfbits_to_floatbits(x) +# define _ROUND_TRIP(x) npy_floatbits_to_halfbits(_CONVERT_FN(x)) # elif @is_double2@ -# define _CONVERT_FN(x) npy_halfbits_to_doublebits(x) +# define _CONVERT_FN(x) _npy_halfbits_to_doublebits(x) +# define _ROUND_TRIP(x) npy_doublebits_to_halfbits(_CONVERT_FN(x)) # elif @is_emu_half2@ # define _CONVERT_FN(x) (x) +# define _ROUND_TRIP(x) (x) # elif @is_bool2@ # define _CONVERT_FN(x) ((npy_bool)!npy_half_iszero(x)) +# define _ROUND_TRIP(x) npy_float_to_half((float)(!npy_half_iszero(x))) # else # define _CONVERT_FN(x) ((_TYPE2)npy_half_to_float(x)) +# define _ROUND_TRIP(x) npy_float_to_half((float)_CONVERT_FN(x)) # endif #elif @is_emu_half2@ - +# define _TO_RTYPE1(x) (@rtype1@)(x) # if @is_float1@ # define _CONVERT_FN(x) npy_floatbits_to_halfbits(x) +# define _ROUND_TRIP(x) npy_half_to_float(npy_float_to_half(x)) # elif @is_double1@ # define _CONVERT_FN(x) npy_doublebits_to_halfbits(x) +# define _ROUND_TRIP(x) npy_half_to_double(npy_double_to_half(x)) # elif @is_emu_half1@ # define _CONVERT_FN(x) (x) +# define _ROUND_TRIP(x) (x) # elif @is_bool1@ # define _CONVERT_FN(x) npy_float_to_half((float)(x!=0)) +# define _ROUND_TRIP(x) (x) # else # define _CONVERT_FN(x) npy_float_to_half((float)x) +# define _ROUND_TRIP(x) ((@rtype1@)npy_half_to_float(_CONVERT_FN(x))) # endif #else - # if @is_bool2@ || @is_bool1@ # define _CONVERT_FN(x) ((npy_bool)(x != 0)) # else # define _CONVERT_FN(x) ((_TYPE2)x) # endif +# define _TO_RTYPE1(x) (@rtype1@)(x) +# define _ROUND_TRIP(x) _TO_RTYPE1(_CONVERT_FN(x)) #endif // Enable auto-vectorization for floating point casts with clang #if @is_native_half1@ || @is_float1@ || @is_double1@ #if @is_native_half2@ || @is_float2@ || @is_double2@ - #if defined(__clang__) && !defined(__EMSCRIPTEN__) + #if defined(__clang__) && !defined(__EMSCRIPTEN__) && !defined(__wasm__) #if __clang_major__ >= 12 _Pragma("clang fp exceptions(ignore)") #endif @@ -873,8 +949,111 @@ NPY_NO_EXPORT PyArrayMethod_StridedLoop * #define GCC_CAST_OPT_LEVEL NPY_GCC_OPT_3 #endif +#define _RETURN_SAME_VALUE_FAILURE \ + npy_gil_error(PyExc_ValueError, "could not cast 'same_value' @name1@ to @name2@"); \ + return -1 + +#if !@is_bool2@ +/* + * Check various modes of failure to accurately cast src_value to dst + */ +static GCC_CAST_OPT_LEVEL int +@prefix@_check_same_value_@name1@_to_@name2@(@rtype1@ *src_valueP) { + + @rtype1@ src_value = *src_valueP; + + /* 1. NaN/Infs always work for float to float and otherwise never */ +#if (@is_float1@ || @is_emu_half1@ || @is_double1@ || @is_native_half1@) + if (!npy_isfinite(_TO_RTYPE1(src_value))) { +# if (@is_float2@ || @is_emu_half2@ || @is_double2@ || @is_native_half2@) + return 0; /* float to float can preserve NaN/Inf */ +# else + _RETURN_SAME_VALUE_FAILURE; /* cannot preserve NaN/Inf */ +# endif + } +#endif + /* + * 2. Check that the src does not overflow the dst. + * This is complicated by a warning that, for instance, int8 cannot + * overflow int64max + */ +# ifdef __GNUC__ +# pragma GCC diagnostic push +# ifdef __clang__ +# pragma GCC diagnostic ignored "-Wtautological-constant-out-of-range-compare" +# endif +# pragma GCC diagnostic ignored "-Wtautological-compare" +# endif +# if !@is_bool1@ + if (_TO_RTYPE1(src_value) > @type2max@) { + _RETURN_SAME_VALUE_FAILURE; + } +# if !@is_unsigned1@ + if (_TO_RTYPE1(src_value) < @type2min@) { + _RETURN_SAME_VALUE_FAILURE; + } +# endif +# endif /* !is_bool1 */ + /* 3. Check that the value can round trip exactly */ + if (src_value != _ROUND_TRIP(src_value)) { + _RETURN_SAME_VALUE_FAILURE; + } +# ifdef __GNUC__ +# pragma GCC diagnostic pop +# endif /* __GNUC__ */ + return 0; +} +#endif + +/* + * Use a declaration instead of moving the function definition to here to make reviewing + * easier. TODO: move the repeat3 up here instead of these declarations + */ + +static GCC_CAST_OPT_LEVEL int +@prefix@_cast_@name1@_to_@name2@_no_same_value( + PyArrayMethod_Context *context, char *const *args, + const npy_intp *dimensions, const npy_intp *strides, + NpyAuxData *NPY_UNUSED(data)); + +#if !@is_bool2@ +static GCC_CAST_OPT_LEVEL int +@prefix@_cast_@name1@_to_@name2@_same_value( + PyArrayMethod_Context *context, char *const *args, + const npy_intp *dimensions, const npy_intp *strides, + NpyAuxData *NPY_UNUSED(data)); +#endif + +/* + * This is the entry point function called outside this file + */ + static GCC_CAST_OPT_LEVEL int @prefix@_cast_@name1@_to_@name2@( + PyArrayMethod_Context *context, char *const *args, + const npy_intp *dimensions, const npy_intp *strides, + NpyAuxData *data) +{ +#if !@is_bool2@ + int same_value_casting = ((context->flags & NPY_SAME_VALUE_CONTEXT_FLAG) == NPY_SAME_VALUE_CONTEXT_FLAG); + if (same_value_casting) { + return @prefix@_cast_@name1@_to_@name2@_same_value(context, args, dimensions, strides, data); + } else { +#else + { +#endif + return @prefix@_cast_@name1@_to_@name2@_no_same_value(context, args, dimensions, strides, data); +}} + +/**begin repeat3 + * #func_name = no_same_value,same_value# + * #same_value = 0,1# + */ + + +#if !(@is_bool2@ && @same_value@) +static GCC_CAST_OPT_LEVEL int +@prefix@_cast_@name1@_to_@name2@_@func_name@( PyArrayMethod_Context *context, char *const *args, const npy_intp *dimensions, const npy_intp *strides, NpyAuxData *NPY_UNUSED(data)) @@ -902,7 +1081,7 @@ static GCC_CAST_OPT_LEVEL int assert(N == 0 || npy_is_aligned(dst, NPY_ALIGNOF(_TYPE2))); #endif - /*printf("@prefix@_cast_@name1@_to_@name2@\n");*/ + /* printf("@prefix@_cast_@name1@_to_@name2@_@func_name@, N=%ld\n", N); */ while (N--) { #if @aligned@ @@ -919,31 +1098,81 @@ static GCC_CAST_OPT_LEVEL int # if @is_complex2@ dst_value[0] = _CONVERT_FN(src_value[0]); dst_value[1] = _CONVERT_FN(src_value[1]); -# elif !@aligned@ +# if @same_value@ + if (@prefix@_check_same_value_@name1@_to_@name2@((@rtype1@ *)&src_value[0]) < 0) { + return -1; + } + if (@prefix@_check_same_value_@name1@_to_@name2@((@rtype1@ *)&src_value[1]) < 0) { + return -1; + } +# endif //same_value +# elif !@aligned@ # if @is_bool2@ dst_value = _CONVERT_FN(src_value[0]) || _CONVERT_FN(src_value[1]); # else dst_value = _CONVERT_FN(src_value[0]); +# if @same_value@ + if (@prefix@_check_same_value_@name1@_to_@name2@((@rtype1@ *)&src_value[0]) < 0) { + return -1; + } + if (src_value[1] != 0) { + npy_gil_error(PyExc_ValueError, "could not cast 'same_value' @name1@ to @name2@: imag is not 0"); + return -1; + } +# endif //same_value # endif # else # if @is_bool2@ *(_TYPE2 *)dst = _CONVERT_FN(src_value[0]) || _CONVERT_FN(src_value[1]); # else *(_TYPE2 *)dst = _CONVERT_FN(src_value[0]); +# if @same_value@ + if (@prefix@_check_same_value_@name1@_to_@name2@((@rtype1@ *)&src_value[0]) < 0) { + return -1; + } + if (src_value[1] != 0) { + npy_gil_error(PyExc_ValueError, "could not cast 'same_value' @name1@ to @name2@: imag is not 0"); + return -1; + } +# endif //same_value # endif # endif -#else +#else // @is_complex1@ # if @is_complex2@ # if !@aligned@ dst_value[0] = _CONVERT_FN(src_value); -# else +# if @same_value@ + if (@prefix@_check_same_value_@name1@_to_@name2@((@rtype1@ *)&src_value) < 0) { + return -1; + } +# endif //same_value +# else //!aligned dst_value[0] = _CONVERT_FN(*(_TYPE1 *)src); +# if @same_value@ + if (@prefix@_check_same_value_@name1@_to_@name2@((@rtype1@ *)src) < 0) { + return -1; + } +# endif //same_value # endif dst_value[1] = 0; # elif !@aligned@ dst_value = _CONVERT_FN(src_value); +# if !@is_bool2@ +# if @same_value@ + if (@prefix@_check_same_value_@name1@_to_@name2@((@rtype1@ *)&src_value) < 0) { + return -1; + } +# endif //same_value +# endif // @is_bool2@ # else *(_TYPE2 *)dst = _CONVERT_FN(*(_TYPE1 *)src); +# if !@is_bool2@ +# if @same_value@ + if (@prefix@_check_same_value_@name1@_to_@name2@(((@rtype1@ *)src)) < 0) { + return -1; + } +# endif //same_value +# endif // @is_bool2@ # endif #endif @@ -966,10 +1195,13 @@ static GCC_CAST_OPT_LEVEL int } return 0; } +#endif // !@is_bool2@ + +/**end repeat3**/ #if @is_native_half1@ || @is_float1@ || @is_double1@ #if @is_native_half2@ || @is_float2@ || @is_double2@ - #if defined(__clang__) && !defined(__EMSCRIPTEN__) + #if defined(__clang__) && !defined(__EMSCRIPTEN__) && !defined(__wasm__) #if __clang_major__ >= 12 _Pragma("clang fp exceptions(strict)") #endif @@ -981,6 +1213,9 @@ static GCC_CAST_OPT_LEVEL int #undef _CONVERT_FN #undef _TYPE2 #undef _TYPE1 +#undef _TO_RTYPE1 +#undef _ROUND_TRIP +#undef _RETURN_SAME_VALUE_FAILURE #endif diff --git a/numpy/_core/src/multiarray/mapping.c b/numpy/_core/src/multiarray/mapping.c index 7953e32fcbf0..d6128f74621a 100644 --- a/numpy/_core/src/multiarray/mapping.c +++ b/numpy/_core/src/multiarray/mapping.c @@ -33,23 +33,6 @@ #include "umathmodule.h" -#define HAS_INTEGER 1 -#define HAS_NEWAXIS 2 -#define HAS_SLICE 4 -#define HAS_ELLIPSIS 8 -/* HAS_FANCY can be mixed with HAS_0D_BOOL, be careful when to use & or == */ -#define HAS_FANCY 16 -#define HAS_BOOL 32 -/* NOTE: Only set if it is neither fancy nor purely integer index! */ -#define HAS_SCALAR_ARRAY 64 -/* - * Indicate that this is a fancy index that comes from a 0d boolean. - * This means that the index does not operate along a real axis. The - * corresponding index type is just HAS_FANCY. - */ -#define HAS_0D_BOOL (HAS_FANCY | 128) - - static int _nonzero_indices(PyObject *myBool, PyArrayObject **arrays); @@ -263,20 +246,22 @@ unpack_indices(PyObject *index, PyObject **result, npy_intp result_n) * * Checks everything but the bounds. * - * @param self the array being indexed + * @param array_ndims The number of dimensions of the array being indexed (1 for iterators) + * @param array_dims The dimensions of the array being indexed (self->size for iterators) * @param index the index object * @param indices index info struct being filled (size of NPY_MAXDIMS * 2 + 1) * @param num number of indices found * @param ndim dimension of the indexing result * @param out_fancy_ndim dimension of the fancy/advanced indices part * @param allow_boolean whether to allow the boolean special case + * @param is_flatiter_object Whether the object indexed is an iterator * * @returns the index_type or -1 on failure and fills the number of indices. */ NPY_NO_EXPORT int -prepare_index(PyArrayObject *self, PyObject *index, - npy_index_info *indices, - int *num, int *ndim, int *out_fancy_ndim, int allow_boolean) +prepare_index_noarray(int array_ndims, npy_intp *array_dims, PyObject *index, + npy_index_info *indices, int *num, int *ndim, int *out_fancy_ndim, + int allow_boolean, int is_flatiter_object) { int new_ndim, fancy_ndim, used_ndim, index_ndim; int curr_idx, get_idx; @@ -314,8 +299,8 @@ prepare_index(PyArrayObject *self, PyObject *index, while (get_idx < index_ndim) { if (curr_idx > NPY_MAXDIMS * 2) { - PyErr_SetString(PyExc_IndexError, - "too many indices for array"); + PyErr_Format(PyExc_IndexError, + "too many indices for %s", is_flatiter_object ? "flat iterator" : "array"); goto failed_building_indices; } @@ -377,7 +362,7 @@ prepare_index(PyArrayObject *self, PyObject *index, * Since this is always an error if it was not a boolean, we can * allow the 0-d special case before the rest. */ - else if (PyArray_NDIM(self) != 0) { + else if (array_ndims != 0) { /* * Single integer index, there are two cases here. * It could be an array, a 0-d array is handled @@ -418,17 +403,55 @@ prepare_index(PyArrayObject *self, PyObject *index, goto failed_building_indices; } + // We raise here because we changed the behavior for boolean + // indices for flat iterators from being handled as integers + // to being regular boolean indices. + // TODO: This should go away fairly soon and lists of booleans + // should be handled as regular boolean indices. + if (is_flatiter_object && PyArray_ISBOOL(tmp_arr) && !PyBool_Check(index)) { + Py_DECREF(tmp_arr); + PyErr_Format(PyExc_IndexError, + "boolean indices for iterators are not supported because " + "of previous behavior that was confusing (valid boolean " + "indices are expected to work in the future)" + ); + goto failed_building_indices; + } + /* * For example an empty list can be cast to an integer array, * however it will default to a float one. */ - if (PyArray_SIZE(tmp_arr) == 0) { - PyArray_Descr *indtype = PyArray_DescrFromType(NPY_INTP); + if (PyArray_SIZE(tmp_arr) == 0 + || (is_flatiter_object && !PyArray_ISINTEGER(tmp_arr) && !PyArray_ISBOOL(tmp_arr))) { + PyArray_Descr *indtype = PyArray_DescrFromType(NPY_INTP); arr = (PyArrayObject *)PyArray_FromArray(tmp_arr, indtype, NPY_ARRAY_FORCECAST); + + // If the cast succeeded (which means that the previous flat iterator + // indexing routine would have succeeded as well), we need to issue a + // deprecation warning. + if (arr + && is_flatiter_object + && PyArray_SIZE(tmp_arr) != 0 + && !PyArray_ISINTEGER(tmp_arr) + && !PyArray_ISBOOL(tmp_arr) + && DEPRECATE("Invalid non-array indices for iterator objects are deprecated and will be " + "removed in a future version. (Deprecated NumPy 2.4)") < 0) { + Py_DECREF(tmp_arr); + goto failed_building_indices; + } Py_DECREF(tmp_arr); if (arr == NULL) { + // Raise a helpful error if this was a ValueError (i.e. could not cast) + if (PyErr_ExceptionMatches(PyExc_ValueError)) { + PyErr_Format(PyExc_IndexError, + "only integers, slices (`:`), ellipsis (`...`)%s and integer or boolean " + "arrays are valid indices", + is_flatiter_object ? "" : ", numpy.newaxis (`None`)" + ); + } goto failed_building_indices; } } @@ -458,9 +481,9 @@ prepare_index(PyArrayObject *self, PyObject *index, * this is always an error. The check ensures that these errors are raised * and match those of the generic path. */ - if ((PyArray_NDIM(arr) == PyArray_NDIM(self)) + if ((PyArray_NDIM(arr) == array_ndims) && PyArray_CompareLists(PyArray_DIMS(arr), - PyArray_DIMS(self), + array_dims, PyArray_NDIM(arr))) { index_type = HAS_BOOL; @@ -468,8 +491,8 @@ prepare_index(PyArrayObject *self, PyObject *index, indices[curr_idx].object = (PyObject *)arr; /* keep track anyway, just to be complete */ - used_ndim = PyArray_NDIM(self); - fancy_ndim = PyArray_NDIM(self); + used_ndim = array_ndims; + fancy_ndim = array_ndims; curr_idx += 1; break; } @@ -524,8 +547,8 @@ prepare_index(PyArrayObject *self, PyObject *index, /* Check that we will not run out of indices to store new ones */ if (curr_idx + n >= NPY_MAXDIMS * 2) { - PyErr_SetString(PyExc_IndexError, - "too many indices for array"); + PyErr_Format(PyExc_IndexError, + "too many indices for %s", is_flatiter_object ? "flat iterator" : "array"); for (i=0; i < n; i++) { Py_DECREF(nonzero_result[i]); } @@ -603,10 +626,11 @@ prepare_index(PyArrayObject *self, PyObject *index, } else { /* The input was not an array, so give a general error message */ - PyErr_SetString(PyExc_IndexError, - "only integers, slices (`:`), ellipsis (`...`), " - "numpy.newaxis (`None`) and integer or boolean " - "arrays are valid indices"); + PyErr_Format(PyExc_IndexError, + "only integers, slices (`:`), ellipsis (`...`)%s and integer or boolean " + "arrays are valid indices", + is_flatiter_object ? "" : ", numpy.newaxis (`None`)" + ); } Py_DECREF(arr); goto failed_building_indices; @@ -616,10 +640,10 @@ prepare_index(PyArrayObject *self, PyObject *index, * Compare dimension of the index to the real ndim. this is * to find the ellipsis value or append an ellipsis if necessary. */ - if (used_ndim < PyArray_NDIM(self)) { + if (used_ndim < array_ndims) { if (index_type & HAS_ELLIPSIS) { - indices[ellipsis_pos].value = PyArray_NDIM(self) - used_ndim; - used_ndim = PyArray_NDIM(self); + indices[ellipsis_pos].value = array_ndims - used_ndim; + used_ndim = array_ndims; new_ndim += indices[ellipsis_pos].value; } else { @@ -630,19 +654,21 @@ prepare_index(PyArrayObject *self, PyObject *index, index_type |= HAS_ELLIPSIS; indices[curr_idx].object = NULL; indices[curr_idx].type = HAS_ELLIPSIS; - indices[curr_idx].value = PyArray_NDIM(self) - used_ndim; + indices[curr_idx].value = array_ndims - used_ndim; ellipsis_pos = curr_idx; - used_ndim = PyArray_NDIM(self); + used_ndim = array_ndims; new_ndim += indices[curr_idx].value; curr_idx += 1; } } - else if (used_ndim > PyArray_NDIM(self)) { + else if (used_ndim > array_ndims) { PyErr_Format(PyExc_IndexError, - "too many indices for array: " - "array is %d-dimensional, but %d were indexed", - PyArray_NDIM(self), + "too many indices for %s: " + "%s is %d-dimensional, but %d were indexed", + is_flatiter_object ? "flat iterator" : "array", + is_flatiter_object ? "flat iterator" : "array", + array_ndims, used_ndim); goto failed_building_indices; } @@ -697,14 +723,15 @@ prepare_index(PyArrayObject *self, PyObject *index, used_ndim = 0; for (i = 0; i < curr_idx; i++) { if ((indices[i].type == HAS_FANCY) && indices[i].value > 0) { - if (indices[i].value != PyArray_DIM(self, used_ndim)) { + if (indices[i].value != array_dims[used_ndim]) { char err_msg[174]; PyOS_snprintf(err_msg, sizeof(err_msg), - "boolean index did not match indexed array along " + "boolean index did not match indexed %s along " "axis %d; size of axis is %" NPY_INTP_FMT " but size of corresponding boolean axis is %" NPY_INTP_FMT, - used_ndim, PyArray_DIM(self, used_ndim), + is_flatiter_object ? "flat iterator" : "array", + used_ndim, array_dims[used_ndim], indices[i].value); PyErr_SetString(PyExc_IndexError, err_msg); goto failed_building_indices; @@ -740,6 +767,16 @@ prepare_index(PyArrayObject *self, PyObject *index, return -1; } +NPY_NO_EXPORT int +prepare_index(PyArrayObject *self, PyObject *index, + npy_index_info *indices, + int *num, int *ndim, int *out_fancy_ndim, int allow_boolean) +{ + return prepare_index_noarray(PyArray_NDIM(self), PyArray_DIMS(self), + index, indices, num, ndim, out_fancy_ndim, + allow_boolean, 0); +} + /** * Check if self has memory overlap with one of the index arrays, or with extra_op. @@ -1136,6 +1173,8 @@ array_assign_boolean_subscript(PyArrayObject *self, } else { v_stride = 0; + /* If the same value is repeated, iteration order does not matter */ + order = NPY_KEEPORDER; } v_data = PyArray_DATA(v); @@ -1351,7 +1390,7 @@ _get_field_view(PyArrayObject *arr, PyObject *ind, PyArrayObject **view) npy_intp offset; /* get the field offset and dtype */ - tup = PyDict_GetItemWithError(PyDataType_FIELDS(PyArray_DESCR(arr)), ind); + tup = PyDict_GetItemWithError(PyDataType_FIELDS(PyArray_DESCR(arr)), ind); // noqa: borrowed-ref OK if (tup == NULL && PyErr_Occurred()) { return 0; } @@ -2110,8 +2149,6 @@ array_assign_subscript(PyArrayObject *self, PyObject *ind, PyObject *op) /* May need a generic copy function (only for refs and odd sizes) */ NPY_ARRAYMETHOD_FLAGS transfer_flags; npy_intp itemsize = PyArray_ITEMSIZE(self); - // TODO: the heuristic used here to determine the src_dtype might be subtly wrong - // for non-REFCHK user DTypes. See gh-27057 for the prior discussion about this. if (PyArray_GetDTypeTransferFunction( 1, itemsize, itemsize, descr, PyArray_DESCR(self), @@ -2134,7 +2171,8 @@ array_assign_subscript(PyArrayObject *self, PyObject *ind, PyObject *op) * Could add a casting check, but apparently most assignments do * not care about safe casting. */ - if (mapiter_set(mit, &cast_info, meth_flags, is_aligned) < 0) { + int result = mapiter_set(mit, &cast_info, meth_flags, is_aligned); + if (result < 0) { goto fail; } @@ -3013,6 +3051,8 @@ PyArray_MapIterNew(npy_index_info *indices , int index_num, int index_type, if (extra_op == NULL) { goto fail; } + // extra_op_dtype might have been replaced, so get a new reference + extra_op_dtype = PyArray_DESCR(extra_op); } /* diff --git a/numpy/_core/src/multiarray/mapping.h b/numpy/_core/src/multiarray/mapping.h index 528cb6604892..d4577c78fdbb 100644 --- a/numpy/_core/src/multiarray/mapping.h +++ b/numpy/_core/src/multiarray/mapping.h @@ -3,6 +3,23 @@ extern NPY_NO_EXPORT PyMappingMethods array_as_mapping; +/* Indexing types */ +#define HAS_INTEGER 1 +#define HAS_NEWAXIS 2 +#define HAS_SLICE 4 +#define HAS_ELLIPSIS 8 +/* HAS_FANCY can be mixed with HAS_0D_BOOL, be careful when to use & or == */ +#define HAS_FANCY 16 +#define HAS_BOOL 32 +/* NOTE: Only set if it is neither fancy nor purely integer index! */ +#define HAS_SCALAR_ARRAY 64 +/* + * Indicate that this is a fancy index that comes from a 0d boolean. + * This means that the index does not operate along a real axis. The + * corresponding index type is just HAS_FANCY. + */ +#define HAS_0D_BOOL (HAS_FANCY | 128) + /* * Object to store information needed for advanced (also fancy) indexing. @@ -113,6 +130,11 @@ typedef struct { } npy_index_info; +NPY_NO_EXPORT int +prepare_index_noarray(int array_ndims, npy_intp *array_dims, PyObject *index, + npy_index_info *indices, int *num, int *ndim, int *out_fancy_ndim, + int allow_boolean, int is_flatiter_object); + NPY_NO_EXPORT Py_ssize_t array_length(PyArrayObject *self); diff --git a/numpy/_core/src/multiarray/methods.c b/numpy/_core/src/multiarray/methods.c index 58a554dc40be..6dcc349dcd03 100644 --- a/numpy/_core/src/multiarray/methods.c +++ b/numpy/_core/src/multiarray/methods.c @@ -752,6 +752,10 @@ array_toscalar(PyArrayObject *self, PyObject *args) return PyArray_MultiIndexGetItem(self, multi_index); } + +NPY_NO_EXPORT int +PyArray_CastingConverterSameValue(PyObject *obj, NPY_CASTING *casting); + static PyObject * array_astype(PyArrayObject *self, PyObject *const *args, Py_ssize_t len_args, PyObject *kwnames) @@ -770,7 +774,7 @@ array_astype(PyArrayObject *self, if (npy_parse_arguments("astype", args, len_args, kwnames, "dtype", &PyArray_DTypeOrDescrConverterRequired, &dt_info, "|order", &PyArray_OrderConverter, &order, - "|casting", &PyArray_CastingConverter, &casting, + "|casting", &PyArray_CastingConverterSameValue, &casting, "|subok", &PyArray_PythonPyIntFromInt, &subok, "|copy", &PyArray_AsTypeCopyConverter, &forcecopy, NULL, NULL, NULL) < 0) { @@ -840,7 +844,12 @@ array_astype(PyArrayObject *self, ((PyArrayObject_fields *)ret)->nd = PyArray_NDIM(self); ((PyArrayObject_fields *)ret)->descr = dtype; } - int success = PyArray_CopyInto(ret, self); + int success; + if (((int)casting & NPY_SAME_VALUE_CASTING_FLAG) > 0) { + success = PyArray_AssignArray(ret, self, NULL, casting); + } else { + success = PyArray_AssignArray(ret, self, NULL, NPY_UNSAFE_CASTING); + } Py_DECREF(dtype); ((PyArrayObject_fields *)ret)->nd = out_ndim; @@ -934,6 +943,11 @@ array_getarray(PyArrayObject *self, PyObject *args, PyObject *kwds) return NULL; } + if (newtype == NULL) { + newtype = PyArray_DESCR(self); + Py_INCREF(newtype); // newtype is owned. + } + /* convert to PyArray_Type */ if (!PyArray_CheckExact(self)) { PyArrayObject *new; @@ -951,6 +965,7 @@ array_getarray(PyArrayObject *self, PyObject *args, PyObject *kwds) (PyObject *)self ); if (new == NULL) { + Py_DECREF(newtype); return NULL; } self = new; @@ -960,22 +975,21 @@ array_getarray(PyArrayObject *self, PyObject *args, PyObject *kwds) } if (copy == NPY_COPY_ALWAYS) { - if (newtype == NULL) { - newtype = PyArray_DESCR(self); - } - ret = PyArray_CastToType(self, newtype, 0); + ret = PyArray_CastToType(self, newtype, 0); // steals newtype reference Py_DECREF(self); return ret; } else { // copy == NPY_COPY_IF_NEEDED || copy == NPY_COPY_NEVER - if (newtype == NULL || PyArray_EquivTypes(PyArray_DESCR(self), newtype)) { + if (PyArray_EquivTypes(PyArray_DESCR(self), newtype)) { + Py_DECREF(newtype); return (PyObject *)self; } if (copy == NPY_COPY_IF_NEEDED) { - ret = PyArray_CastToType(self, newtype, 0); + ret = PyArray_CastToType(self, newtype, 0); // steals newtype reference. Py_DECREF(self); return ret; } else { // copy == NPY_COPY_NEVER PyErr_SetString(PyExc_ValueError, npy_no_copy_err_msg); + Py_DECREF(newtype); Py_DECREF(self); return NULL; } @@ -997,26 +1011,26 @@ any_array_ufunc_overrides(PyObject *args, PyObject *kwds) int i; int nin, nout; PyObject *out_kwd_obj; - PyObject *fast; - PyObject **in_objs, **out_objs, *where_obj; + PyObject **out_objs, *where_obj; /* check inputs */ nin = PyTuple_Size(args); if (nin < 0) { return -1; } - fast = PySequence_Fast(args, "Could not convert object to sequence"); - if (fast == NULL) { - return -1; - } - in_objs = PySequence_Fast_ITEMS(fast); for (i = 0; i < nin; ++i) { - if (PyUFunc_HasOverride(in_objs[i])) { - Py_DECREF(fast); +#if defined(PYPY_VERSION) || defined(Py_LIMITED_API) + PyObject *obj = PyTuple_GetItem(args, i); + if (obj == NULL) { + return -1; + } +#else + PyObject *obj = PyTuple_GET_ITEM(args, i); +#endif + if (PyUFunc_HasOverride(obj)) { return 1; } } - Py_DECREF(fast); if (kwds == NULL) { return 0; } @@ -1033,7 +1047,7 @@ any_array_ufunc_overrides(PyObject *args, PyObject *kwds) } Py_DECREF(out_kwd_obj); /* check where if it exists */ - where_obj = PyDict_GetItemWithError(kwds, npy_interned_str.where); + where_obj = PyDict_GetItemWithError(kwds, npy_interned_str.where); // noqa: borrowed-ref OK if (where_obj == NULL) { if (PyErr_Occurred()) { return -1; @@ -1115,14 +1129,15 @@ array_function(PyArrayObject *NPY_UNUSED(self), PyObject *c_args, PyObject *c_kw PyErr_SetString(PyExc_TypeError, "kwargs must be a dict."); return NULL; } - types = PySequence_Fast( + types = PySequence_Fast( // noqa: borrowed-ref OK types, "types argument to ndarray.__array_function__ must be iterable"); if (types == NULL) { return NULL; } - + NPY_BEGIN_CRITICAL_SECTION_SEQUENCE_FAST(types); result = array_function_method_impl(func, types, args, kwargs); + NPY_END_CRITICAL_SECTION_SEQUENCE_FAST(); Py_DECREF(types); return result; } @@ -1153,7 +1168,6 @@ array_copy_keeporder(PyArrayObject *self, PyObject *args) return PyArray_NewCopy(self, NPY_KEEPORDER); } -#include static PyObject * array_resize(PyArrayObject *self, PyObject *args, PyObject *kwds) { @@ -1161,8 +1175,7 @@ array_resize(PyArrayObject *self, PyObject *args, PyObject *kwds) Py_ssize_t size = PyTuple_Size(args); int refcheck = 1; PyArray_Dims newshape; - PyObject *ret, *obj; - + PyObject *obj; if (!NpyArg_ParseKeywords(kwds, "|i", kwlist, &refcheck)) { return NULL; @@ -1185,12 +1198,11 @@ array_resize(PyArrayObject *self, PyObject *args, PyObject *kwds) return NULL; } - ret = PyArray_Resize(self, &newshape, refcheck, NPY_ANYORDER); + int ret = PyArray_Resize_int(self, &newshape, refcheck); npy_free_cache_dim_obj(newshape); - if (ret == NULL) { + if (ret < 0) { return NULL; } - Py_DECREF(ret); Py_RETURN_NONE; } @@ -1249,11 +1261,12 @@ array_sort(PyArrayObject *self, { int axis = -1; int val; - NPY_SORTKIND sortkind = _NPY_SORT_UNDEFINED; PyObject *order = NULL; PyArray_Descr *saved = NULL; PyArray_Descr *newd; + NPY_SORTKIND sortkind = _NPY_SORT_UNDEFINED; int stable = -1; + int descending = -1; NPY_PREPARE_ARGPARSER; if (npy_parse_arguments("sort", args, len_args, kwnames, @@ -1261,18 +1274,39 @@ array_sort(PyArrayObject *self, "|kind", &PyArray_SortkindConverter, &sortkind, "|order", NULL, &order, "$stable", &PyArray_OptionalBoolConverter, &stable, +// "$descending", &PyArray_OptionalBoolConverter, &descending, NULL, NULL, NULL) < 0) { return NULL; } - if (order == Py_None) { - order = NULL; + + if (sortkind == _NPY_SORT_UNDEFINED) { + // keywords only if sortkind not passed + sortkind = 0; + sortkind |= (stable > 0)? NPY_SORT_STABLE: 0; + sortkind |= (descending > 0)? NPY_SORT_DESCENDING: 0; } + else { + // Check that no keywords are used + int keywords_used = 0; + keywords_used |= (stable != -1); + keywords_used |= (descending != -1); + if (keywords_used) { + PyErr_SetString(PyExc_ValueError, + "`kind` and keyword parameters can't be provided at " + "the same time. Use only one of them."); + return NULL; + } + } + + order = (order != Py_None)? order: NULL; + // Reorder field names if required. if (order != NULL) { PyObject *new_name; PyObject *_numpy_internal; saved = PyArray_DESCR(self); if (!PyDataType_HASFIELDS(saved)) { - PyErr_SetString(PyExc_ValueError, "Cannot specify " \ + PyErr_SetString(PyExc_ValueError, + "Cannot specify " "order when the array has no fields."); return NULL; } @@ -1295,20 +1329,9 @@ array_sort(PyArrayObject *self, ((_PyArray_LegacyDescr *)newd)->names = new_name; ((PyArrayObject_fields *)self)->descr = newd; } - if (sortkind != _NPY_SORT_UNDEFINED && stable != -1) { - PyErr_SetString(PyExc_ValueError, - "`kind` and `stable` parameters can't be provided at " - "the same time. Use only one of them."); - return NULL; - } - else if ((sortkind == _NPY_SORT_UNDEFINED && stable == -1) || (stable == 0)) { - sortkind = NPY_QUICKSORT; - } - else if (stable == 1) { - sortkind = NPY_STABLESORT; - } val = PyArray_Sort(self, axis, sortkind); + if (order != NULL) { Py_XDECREF(PyArray_DESCR(self)); ((PyArrayObject_fields *)self)->descr = saved; @@ -1319,6 +1342,7 @@ array_sort(PyArrayObject *self, Py_RETURN_NONE; } + static PyObject * array_partition(PyArrayObject *self, PyObject *const *args, Py_ssize_t len_args, PyObject *kwnames) @@ -1392,15 +1416,19 @@ array_partition(PyArrayObject *self, Py_RETURN_NONE; } + static PyObject * array_argsort(PyArrayObject *self, PyObject *const *args, Py_ssize_t len_args, PyObject *kwnames) { int axis = -1; + PyObject *res; + PyObject *order = NULL; + PyArray_Descr *saved = NULL; + PyArray_Descr *newd; NPY_SORTKIND sortkind = _NPY_SORT_UNDEFINED; - PyObject *order = NULL, *res; - PyArray_Descr *newd, *saved=NULL; int stable = -1; + int descending = -1; NPY_PREPARE_ARGPARSER; if (npy_parse_arguments("argsort", args, len_args, kwnames, @@ -1408,12 +1436,32 @@ array_argsort(PyArrayObject *self, "|kind", &PyArray_SortkindConverter, &sortkind, "|order", NULL, &order, "$stable", &PyArray_OptionalBoolConverter, &stable, +// "$descending", &PyArray_OptionalBoolConverter, &descending, NULL, NULL, NULL) < 0) { return NULL; } - if (order == Py_None) { - order = NULL; + + if (sortkind == _NPY_SORT_UNDEFINED) { + // keywords only if sortkind not passed + sortkind = 0; + sortkind |= (stable > 0)? NPY_SORT_STABLE: 0; + sortkind |= (descending > 0)? NPY_SORT_DESCENDING: 0; } + else { + // Check that no keywords are used + int keywords_used = 0; + keywords_used |= (stable != -1); + keywords_used |= (descending != -1); + if (keywords_used) { + PyErr_SetString(PyExc_ValueError, + "`kind` and keyword parameters can't be provided at " + "the same time. Use only one of them."); + return NULL; + } + } + + // Reorder field names if required. + order = (order != Py_None)? order: NULL; if (order != NULL) { PyObject *new_name; PyObject *_numpy_internal; @@ -1442,20 +1490,9 @@ array_argsort(PyArrayObject *self, ((_PyArray_LegacyDescr *)newd)->names = new_name; ((PyArrayObject_fields *)self)->descr = newd; } - if (sortkind != _NPY_SORT_UNDEFINED && stable != -1) { - PyErr_SetString(PyExc_ValueError, - "`kind` and `stable` parameters can't be provided at " - "the same time. Use only one of them."); - return NULL; - } - else if ((sortkind == _NPY_SORT_UNDEFINED && stable == -1) || (stable == 0)) { - sortkind = NPY_QUICKSORT; - } - else if (stable == 1) { - sortkind = NPY_STABLESORT; - } res = PyArray_ArgSort(self, axis, sortkind); + if (order != NULL) { Py_XDECREF(PyArray_DESCR(self)); ((PyArrayObject_fields *)self)->descr = saved; @@ -1566,7 +1603,7 @@ _deepcopy_call(char *iptr, char *optr, PyArray_Descr *dtype, PyArray_Descr *new; int offset, res; Py_ssize_t pos = 0; - while (PyDict_Next(PyDataType_FIELDS(dtype), &pos, &key, &value)) { + while (PyDict_Next(PyDataType_FIELDS(dtype), &pos, &key, &value)) { // noqa: borrowed-ref OK if (NPY_TITLE_KEY(key, value)) { continue; } @@ -1733,7 +1770,7 @@ _setlist_pkl(PyArrayObject *self, PyObject *list) return -1; } while(iter->index < iter->size) { - theobject = PyList_GET_ITEM(list, iter->index); + theobject = PyList_GET_ITEM(list, iter->index); // noqa: borrowed-ref OK setitem(theobject, iter->dataptr, self); PyArray_ITER_NEXT(iter); } @@ -2860,7 +2897,8 @@ NPY_NO_EXPORT PyMethodDef array_methods[] = { /* for the sys module */ {"__sizeof__", (PyCFunction) array_sizeof, - METH_NOARGS, NULL}, + METH_NOARGS, + "__sizeof__($self, /)\n--\n\nSize in memory."}, /* for the copy module */ {"__copy__", @@ -2889,11 +2927,13 @@ NPY_NO_EXPORT PyMethodDef array_methods[] = { {"__complex__", (PyCFunction) array_complex, - METH_VARARGS, NULL}, + METH_VARARGS, + "__complex__($self, /)\n--\n\ncomplex(self)"}, {"__format__", (PyCFunction) array_format, - METH_VARARGS, NULL}, + METH_VARARGS, + "__format__($self, spec, /)\n--\n\nformat(self[, spec])"}, {"__class_getitem__", (PyCFunction)array_class_getitem, diff --git a/numpy/_core/src/multiarray/multiarraymodule.c b/numpy/_core/src/multiarray/multiarraymodule.c index 8ba38b555edb..8bede253a22f 100644 --- a/numpy/_core/src/multiarray/multiarraymodule.c +++ b/numpy/_core/src/multiarray/multiarraymodule.c @@ -43,6 +43,7 @@ NPY_NO_EXPORT int NPY_NUMUSERTYPES = 0; #include "arraytypes.h" #include "arrayobject.h" #include "array_converter.h" +#include "blas_utils.h" #include "hashdescr.h" #include "descriptor.h" #include "dragon4.h" @@ -530,8 +531,7 @@ PyArray_ConcatenateArrays(int narrays, PyArrayObject **arrays, int axis, NPY_NO_EXPORT PyArrayObject * PyArray_ConcatenateFlattenedArrays(int narrays, PyArrayObject **arrays, NPY_ORDER order, PyArrayObject *ret, - PyArray_Descr *dtype, NPY_CASTING casting, - npy_bool casting_not_passed) + PyArray_Descr *dtype, NPY_CASTING casting) { int iarrays; npy_intp shape = 0; @@ -647,12 +647,11 @@ PyArray_ConcatenateFlattenedArrays(int narrays, PyArrayObject **arrays, * @param ret output array to fill * @param dtype Forced output array dtype (cannot be combined with ret) * @param casting Casting mode used - * @param casting_not_passed Deprecation helper */ NPY_NO_EXPORT PyObject * PyArray_ConcatenateInto(PyObject *op, int axis, PyArrayObject *ret, PyArray_Descr *dtype, - NPY_CASTING casting, npy_bool casting_not_passed) + NPY_CASTING casting) { int iarrays, narrays; PyArrayObject **arrays; @@ -670,10 +669,17 @@ PyArray_ConcatenateInto(PyObject *op, } /* Convert the input list into arrays */ - narrays = PySequence_Size(op); - if (narrays < 0) { + Py_ssize_t narrays_true = PySequence_Size(op); + if (narrays_true < 0) { return NULL; } + else if (narrays_true > NPY_MAX_INT) { + PyErr_Format(PyExc_ValueError, + "concatenate() only supports up to %d arrays but got %zd.", + NPY_MAX_INT, narrays_true); + return NULL; + } + narrays = (int)narrays_true; arrays = PyArray_malloc(narrays * sizeof(arrays[0])); if (arrays == NULL) { PyErr_NoMemory(); @@ -698,7 +704,7 @@ PyArray_ConcatenateInto(PyObject *op, if (axis == NPY_RAVEL_AXIS) { ret = PyArray_ConcatenateFlattenedArrays( narrays, arrays, NPY_CORDER, ret, dtype, - casting, casting_not_passed); + casting); } else { ret = PyArray_ConcatenateArrays( @@ -743,7 +749,7 @@ PyArray_Concatenate(PyObject *op, int axis) casting = NPY_SAME_KIND_CASTING; } return PyArray_ConcatenateInto( - op, axis, NULL, NULL, casting, 0); + op, axis, NULL, NULL, casting); } static int @@ -1554,7 +1560,7 @@ _prepend_ones(PyArrayObject *arr, int nd, int ndmin, NPY_ORDER order) static inline PyObject * _array_fromobject_generic( PyObject *op, PyArray_Descr *in_descr, PyArray_DTypeMeta *in_DType, - NPY_COPYMODE copy, NPY_ORDER order, npy_bool subok, int ndmin) + NPY_COPYMODE copy, NPY_ORDER order, npy_bool subok, int ndmin, int ndmax) { PyArrayObject *oparr = NULL, *ret = NULL; PyArray_Descr *oldtype = NULL; @@ -1564,10 +1570,9 @@ _array_fromobject_generic( Py_XINCREF(in_descr); PyArray_Descr *dtype = in_descr; - if (ndmin > NPY_MAXDIMS) { + if (ndmin > ndmax) { PyErr_Format(PyExc_ValueError, - "ndmin bigger than allowable number of dimensions " - "NPY_MAXDIMS (=%d)", NPY_MAXDIMS); + "ndmin must be <= ndmax (%d)", ndmax); goto finish; } /* fast exit if simple call */ @@ -1676,7 +1681,7 @@ _array_fromobject_generic( flags |= NPY_ARRAY_FORCECAST; ret = (PyArrayObject *)PyArray_CheckFromAny_int( - op, dtype, in_DType, 0, 0, flags, NULL); + op, dtype, in_DType, 0, ndmax, flags, NULL); finish: Py_XDECREF(dtype); @@ -1707,6 +1712,7 @@ array_array(PyObject *NPY_UNUSED(ignored), npy_bool subok = NPY_FALSE; NPY_COPYMODE copy = NPY_COPY_ALWAYS; int ndmin = 0; + int ndmax = NPY_MAXDIMS; npy_dtype_info dt_info = {NULL, NULL}; NPY_ORDER order = NPY_KEEPORDER; PyObject *like = Py_None; @@ -1720,6 +1726,7 @@ array_array(PyObject *NPY_UNUSED(ignored), "$order", &PyArray_OrderConverter, &order, "$subok", &PyArray_BoolConverter, &subok, "$ndmin", &PyArray_PythonPyIntFromInt, &ndmin, + "$ndmax", &PyArray_PythonPyIntFromInt, &ndmax, "$like", NULL, &like, NULL, NULL, NULL) < 0) { Py_XDECREF(dt_info.descr); @@ -1741,8 +1748,15 @@ array_array(PyObject *NPY_UNUSED(ignored), op = args[0]; } + if (ndmax > NPY_MAXDIMS || ndmax < 0) { + PyErr_Format(PyExc_ValueError, "ndmax must be in the range [0, NPY_MAXDIMS (%d)] ", NPY_MAXDIMS); + Py_XDECREF(dt_info.descr); + Py_XDECREF(dt_info.dtype); + return NULL; + } + PyObject *res = _array_fromobject_generic( - op, dt_info.descr, dt_info.dtype, copy, order, subok, ndmin); + op, dt_info.descr, dt_info.dtype, copy, order, subok, ndmin, ndmax); Py_XDECREF(dt_info.descr); Py_XDECREF(dt_info.dtype); return res; @@ -1788,7 +1802,7 @@ array_asarray(PyObject *NPY_UNUSED(ignored), } PyObject *res = _array_fromobject_generic( - op, dt_info.descr, dt_info.dtype, copy, order, NPY_FALSE, 0); + op, dt_info.descr, dt_info.dtype, copy, order, NPY_FALSE, 0, NPY_MAXDIMS); Py_XDECREF(dt_info.descr); Py_XDECREF(dt_info.dtype); return res; @@ -1834,7 +1848,7 @@ array_asanyarray(PyObject *NPY_UNUSED(ignored), } PyObject *res = _array_fromobject_generic( - op, dt_info.descr, dt_info.dtype, copy, order, NPY_TRUE, 0); + op, dt_info.descr, dt_info.dtype, copy, order, NPY_TRUE, 0, NPY_MAXDIMS); Py_XDECREF(dt_info.descr); Py_XDECREF(dt_info.dtype); return res; @@ -1876,7 +1890,7 @@ array_ascontiguousarray(PyObject *NPY_UNUSED(ignored), PyObject *res = _array_fromobject_generic( op, dt_info.descr, dt_info.dtype, NPY_COPY_IF_NEEDED, NPY_CORDER, NPY_FALSE, - 1); + 1, NPY_MAXDIMS); Py_XDECREF(dt_info.descr); Py_XDECREF(dt_info.dtype); return res; @@ -1918,7 +1932,7 @@ array_asfortranarray(PyObject *NPY_UNUSED(ignored), PyObject *res = _array_fromobject_generic( op, dt_info.descr, dt_info.dtype, NPY_COPY_IF_NEEDED, NPY_FORTRANORDER, - NPY_FALSE, 1); + NPY_FALSE, 1, NPY_MAXDIMS); Py_XDECREF(dt_info.descr); Py_XDECREF(dt_info.dtype); return res; @@ -2489,7 +2503,6 @@ array_concatenate(PyObject *NPY_UNUSED(dummy), PyObject *out = NULL; PyArray_Descr *dtype = NULL; NPY_CASTING casting = NPY_SAME_KIND_CASTING; - PyObject *casting_obj = NULL; PyObject *res; int axis = 0; @@ -2499,22 +2512,10 @@ array_concatenate(PyObject *NPY_UNUSED(dummy), "|axis", &PyArray_AxisConverter, &axis, "|out", NULL, &out, "$dtype", &PyArray_DescrConverter2, &dtype, - "$casting", NULL, &casting_obj, + "$casting", &PyArray_CastingConverter, &casting, NULL, NULL, NULL) < 0) { return NULL; } - int casting_not_passed = 0; - if (casting_obj == NULL) { - /* - * Casting was not passed in, needed for deprecation only. - * This should be simplified once the deprecation is finished. - */ - casting_not_passed = 1; - } - else if (!PyArray_CastingConverter(casting_obj, &casting)) { - Py_XDECREF(dtype); - return NULL; - } if (out != NULL) { if (out == Py_None) { out = NULL; @@ -2526,7 +2527,7 @@ array_concatenate(PyObject *NPY_UNUSED(dummy), } } res = PyArray_ConcatenateInto(a0, axis, (PyArrayObject *)out, dtype, - casting, casting_not_passed); + casting); Py_XDECREF(dtype); return res; } @@ -2767,32 +2768,33 @@ einsum_sub_op_from_str( static int einsum_list_to_subscripts(PyObject *obj, char *subscripts, int subsize) { - int ellipsis = 0, subindex = 0; + int ellipsis = 0, subindex = 0, ret = -1; npy_intp i, size; - PyObject *item; + PyObject *item, *seq; - obj = PySequence_Fast(obj, "the subscripts for each operand must " + seq = PySequence_Fast(obj, "the subscripts for each operand must " // noqa: borrowed-ref OK "be a list or a tuple"); - if (obj == NULL) { + if (seq == NULL) { return -1; } - size = PySequence_Size(obj); + + NPY_BEGIN_CRITICAL_SECTION_SEQUENCE_FAST(obj); + + size = PySequence_Size(seq); for (i = 0; i < size; ++i) { - item = PySequence_Fast_GET_ITEM(obj, i); + item = PySequence_Fast_GET_ITEM(seq, i); /* Ellipsis */ if (item == Py_Ellipsis) { if (ellipsis) { PyErr_SetString(PyExc_ValueError, "each subscripts list may have only one ellipsis"); - Py_DECREF(obj); - return -1; + goto cleanup; } if (subindex + 3 >= subsize) { PyErr_SetString(PyExc_ValueError, "subscripts list is too long"); - Py_DECREF(obj); - return -1; + goto cleanup; } subscripts[subindex++] = '.'; subscripts[subindex++] = '.'; @@ -2807,16 +2809,14 @@ einsum_list_to_subscripts(PyObject *obj, char *subscripts, int subsize) PyErr_SetString(PyExc_TypeError, "each subscript must be either an integer " "or an ellipsis"); - Py_DECREF(obj); - return -1; + goto cleanup; } npy_bool bad_input = 0; if (subindex + 1 >= subsize) { PyErr_SetString(PyExc_ValueError, "subscripts list is too long"); - Py_DECREF(obj); - return -1; + goto cleanup; } if (s < 0) { @@ -2835,16 +2835,19 @@ einsum_list_to_subscripts(PyObject *obj, char *subscripts, int subsize) if (bad_input) { PyErr_SetString(PyExc_ValueError, "subscript is not within the valid range [0, 52)"); - Py_DECREF(obj); - return -1; + goto cleanup; } } - } - Py_DECREF(obj); + ret = subindex; + + cleanup:; + + NPY_END_CRITICAL_SECTION_SEQUENCE_FAST(); + Py_DECREF(seq); - return subindex; + return ret; } /* @@ -4324,6 +4327,108 @@ normalize_axis_index(PyObject *NPY_UNUSED(self), } +static PyObject * +_populate_finfo_constants(PyObject *NPY_UNUSED(self), PyObject *args) +{ + if (PyTuple_Size(args) != 2) { + PyErr_SetString(PyExc_TypeError, "Expected 2 arguments"); + return NULL; + } + PyObject *finfo = PyTuple_GetItem(args, 0); + if (finfo == NULL || finfo == Py_None) { + PyErr_SetString(PyExc_TypeError, "First argument cannot be None"); + return NULL; + } + PyArray_Descr *descr = (PyArray_Descr *)PyTuple_GetItem(args, 1); + if (!PyArray_DescrCheck(descr)) { + PyErr_SetString(PyExc_TypeError, "Second argument must be a dtype"); + return NULL; + } + + static const struct { + char *name; + int id; + npy_bool is_int; + } finfo_constants[] = { + {"max", NPY_CONSTANT_maximum_finite, 0}, + {"min", NPY_CONSTANT_minimum_finite, 0}, + {"_radix", NPY_CONSTANT_finfo_radix, 0}, + {"eps", NPY_CONSTANT_finfo_eps, 0}, + {"smallest_normal", NPY_CONSTANT_finfo_smallest_normal, 0}, + {"smallest_subnormal", NPY_CONSTANT_finfo_smallest_subnormal, 0}, + {"nmant", NPY_CONSTANT_finfo_nmant, 1}, + {"minexp", NPY_CONSTANT_finfo_min_exp, 1}, + {"maxexp", NPY_CONSTANT_finfo_max_exp, 1}, + {"precision", NPY_CONSTANT_finfo_decimal_digits, 1}, + }; + static const int n_finfo_constants = sizeof(finfo_constants) / sizeof(finfo_constants[0]); + + int n_float_constants = 0; + for (int i = 0; i < n_finfo_constants; i++) { + if (!finfo_constants[i].is_int) { + n_float_constants++; + } + } + + PyArrayObject *buffer_array = NULL; + char *buffer_data = NULL; + npy_intp dims[1] = {n_float_constants}; + + Py_INCREF(descr); + buffer_array = (PyArrayObject *)PyArray_NewFromDescr(&PyArray_Type, + descr, 1, dims, NULL, NULL, 0, NULL); + if (buffer_array == NULL) { + return NULL; + } + buffer_data = PyArray_BYTES(buffer_array); + npy_intp elsize = PyArray_DESCR(buffer_array)->elsize; + + for (int i = 0; i < n_finfo_constants; i++) + { + PyObject *value_obj; + if (!finfo_constants[i].is_int) { + int res = NPY_DT_CALL_get_constant(descr, + finfo_constants[i].id, buffer_data); + if (res < 0) { + goto fail; + } + if (res == 0) { + buffer_data += elsize; // Move to next element + continue; + } + // Return as 0-d array item to preserve numpy scalar type + value_obj = PyArray_ToScalar(buffer_data, buffer_array); + buffer_data += elsize; // Move to next element + } + else { + npy_intp int_value; + int res = NPY_DT_CALL_get_constant(descr, finfo_constants[i].id, &int_value); + if (res < 0) { + goto fail; + } + if (res == 0) { + continue; + } + value_obj = PyLong_FromSsize_t(int_value); + } + if (value_obj == NULL) { + goto fail; + } + int res = PyObject_SetAttrString(finfo, finfo_constants[i].name, value_obj); + Py_DECREF(value_obj); + if (res < 0) { + goto fail; + } + } + + Py_DECREF(buffer_array); + Py_RETURN_NONE; + fail: + Py_XDECREF(buffer_array); + return NULL; +} + + static PyObject * _set_numpy_warn_if_no_mem_policy(PyObject *NPY_UNUSED(self), PyObject *arg) { @@ -4342,6 +4447,25 @@ _set_numpy_warn_if_no_mem_policy(PyObject *NPY_UNUSED(self), PyObject *arg) } +static PyObject * +_blas_supports_fpe(PyObject *NPY_UNUSED(self), PyObject *arg) { + if (arg == Py_None) { + return PyBool_FromLong(npy_blas_supports_fpe()); + } + else if (arg == Py_True) { + return PyBool_FromLong(npy_set_blas_supports_fpe(true)); + } + else if (arg == Py_False) { + return PyBool_FromLong(npy_set_blas_supports_fpe(false)); + } + else { + PyErr_SetString(PyExc_TypeError, + "BLAS FPE support must be None, True, or False"); + return NULL; + } +} + + static PyObject * _reload_guard(PyObject *NPY_UNUSED(self), PyObject *NPY_UNUSED(args)) { #if !defined(PYPY_VERSION) @@ -4553,6 +4677,8 @@ static struct PyMethodDef array_module_methods[] = { METH_VARARGS | METH_KEYWORDS, NULL}, {"_load_from_filelike", (PyCFunction)_load_from_filelike, METH_FASTCALL | METH_KEYWORDS, NULL}, + {"_populate_finfo_constants", (PyCFunction)_populate_finfo_constants, + METH_VARARGS, NULL}, /* from umath */ {"frompyfunc", (PyCFunction) ufunc_frompyfunc, @@ -4580,13 +4706,15 @@ static struct PyMethodDef array_module_methods[] = { METH_NOARGS, NULL}, {"_set_madvise_hugepage", (PyCFunction)_set_madvise_hugepage, METH_O, NULL}, + {"_blas_supports_fpe", (PyCFunction)_blas_supports_fpe, + METH_O, "BLAS FPE support pass None, True, or False and returns new value"}, {"_reload_guard", (PyCFunction)_reload_guard, METH_NOARGS, "Give a warning on reload and big warning in sub-interpreters."}, {"from_dlpack", (PyCFunction)from_dlpack, METH_FASTCALL | METH_KEYWORDS, NULL}, {"_unique_hash", (PyCFunction)array__unique_hash, - METH_O, "Collect unique values via a hash map."}, + METH_FASTCALL | METH_KEYWORDS, "Collect unique values via a hash map."}, {NULL, NULL, 0, NULL} /* sentinel */ }; @@ -4773,36 +4901,27 @@ initialize_thread_unsafe_state(void) { return 0; } -static struct PyModuleDef moduledef = { - PyModuleDef_HEAD_INIT, - "_multiarray_umath", - NULL, - -1, - array_module_methods, - NULL, - NULL, - NULL, - NULL -}; +static int module_loaded = 0; -/* Initialization function for the module */ -PyMODINIT_FUNC PyInit__multiarray_umath(void) { - PyObject *m, *d, *s; - PyObject *c_api; +static int +_multiarray_umath_exec(PyObject *m) { + PyObject *d, *s, *c_api; - /* Create the module and add the functions */ - m = PyModule_Create(&moduledef); - if (!m) { - return NULL; + // https://docs.python.org/3/howto/isolating-extensions.html#opt-out-limiting-to-one-module-object-per-process + if (module_loaded) { + PyErr_SetString(PyExc_ImportError, + "cannot load module more than once per process"); + return -1; } + module_loaded = 1; /* Initialize CPU features */ if (npy_cpu_init() < 0) { - goto err; + return -1; } /* Initialize CPU dispatch tracer */ if (npy_cpu_dispatch_tracer_init(m) < 0) { - goto err; + return -1; } #if defined(MS_WIN64) && defined(__GNUC__) @@ -4818,62 +4937,73 @@ PyMODINIT_FUNC PyInit__multiarray_umath(void) { numpy_pydatetime_import(); if (PyErr_Occurred()) { - goto err; + return -1; } /* Add some symbolic constants to the module */ d = PyModule_GetDict(m); if (!d) { - goto err; + return -1; } if (intern_strings() < 0) { - goto err; + return -1; } if (initialize_static_globals() < 0) { - goto err; + return -1; } if (initialize_thread_unsafe_state() < 0) { - goto err; + return -1; } if (init_import_mutex() < 0) { - goto err; + return -1; } if (init_extobj() < 0) { - goto err; + return -1; } + /* Set __signature__ to None on the type (the instance has a property) */ + s = npy_import("numpy._globals", "_signature_descriptor"); + if (s == NULL) { + return -1; + } + PyUFunc_Type.tp_dict = Py_BuildValue( + "{ON}", npy_interned_str.__signature__, s); + if (PyUFunc_Type.tp_dict == NULL) { + return -1; + } if (PyType_Ready(&PyUFunc_Type) < 0) { - goto err; + Py_CLEAR(PyUFunc_Type.tp_dict); + return -1; } PyArrayDTypeMeta_Type.tp_base = &PyType_Type; if (PyType_Ready(&PyArrayDTypeMeta_Type) < 0) { - goto err; + return -1; } PyArrayDescr_Type.tp_hash = PyArray_DescrHash; Py_SET_TYPE(&PyArrayDescr_Type, &PyArrayDTypeMeta_Type); if (PyType_Ready(&PyArrayDescr_Type) < 0) { - goto err; + return -1; } initialize_casting_tables(); initialize_numeric_types(); if (initscalarmath(m) < 0) { - goto err; + return -1; } if (PyType_Ready(&PyArray_Type) < 0) { - goto err; + return -1; } if (setup_scalartypes(d) < 0) { - goto err; + return -1; } PyArrayIter_Type.tp_iter = PyObject_SelfIter; @@ -4881,28 +5011,28 @@ PyMODINIT_FUNC PyInit__multiarray_umath(void) { PyArrayMultiIter_Type.tp_iter = PyObject_SelfIter; PyArrayMultiIter_Type.tp_free = PyArray_free; if (PyType_Ready(&PyArrayIter_Type) < 0) { - goto err; + return -1; } if (PyType_Ready(&PyArrayMapIter_Type) < 0) { - goto err; + return -1; } if (PyType_Ready(&PyArrayMultiIter_Type) < 0) { - goto err; + return -1; } PyArrayNeighborhoodIter_Type.tp_new = PyType_GenericNew; if (PyType_Ready(&PyArrayNeighborhoodIter_Type) < 0) { - goto err; + return -1; } if (PyType_Ready(&NpyIter_Type) < 0) { - goto err; + return -1; } if (PyType_Ready(&PyArrayFlags_Type) < 0) { - goto err; + return -1; } NpyBusDayCalendar_Type.tp_new = PyType_GenericNew; if (PyType_Ready(&NpyBusDayCalendar_Type) < 0) { - goto err; + return -1; } /* @@ -4923,43 +5053,43 @@ PyMODINIT_FUNC PyInit__multiarray_umath(void) { s = npy_cpu_features_dict(); if (s == NULL) { - goto err; + return -1; } if (PyDict_SetItemString(d, "__cpu_features__", s) < 0) { Py_DECREF(s); - goto err; + return -1; } Py_DECREF(s); s = npy_cpu_baseline_list(); if (s == NULL) { - goto err; + return -1; } if (PyDict_SetItemString(d, "__cpu_baseline__", s) < 0) { Py_DECREF(s); - goto err; + return -1; } Py_DECREF(s); s = npy_cpu_dispatch_list(); if (s == NULL) { - goto err; + return -1; } if (PyDict_SetItemString(d, "__cpu_dispatch__", s) < 0) { Py_DECREF(s); - goto err; + return -1; } Py_DECREF(s); s = PyCapsule_New((void *)_datetime_strings, NULL, NULL); if (s == NULL) { - goto err; + return -1; } PyDict_SetItemString(d, "DATETIMEUNITS", s); Py_DECREF(s); #define ADDCONST(NAME) \ - s = PyLong_FromLong(NPY_##NAME); \ + s = PyLong_FromLong(NPY_##NAME); \ PyDict_SetItemString(d, #NAME, s); \ Py_DECREF(s) @@ -4999,39 +5129,39 @@ PyMODINIT_FUNC PyInit__multiarray_umath(void) { /* Finalize scalar types and expose them via namespace or typeinfo dict */ if (set_typeinfo(d) != 0) { - goto err; + return -1; } if (PyType_Ready(&PyArrayFunctionDispatcher_Type) < 0) { - goto err; + return -1; } PyDict_SetItemString( d, "_ArrayFunctionDispatcher", (PyObject *)&PyArrayFunctionDispatcher_Type); if (PyType_Ready(&PyArrayArrayConverter_Type) < 0) { - goto err; + return -1; } PyDict_SetItemString( d, "_array_converter", (PyObject *)&PyArrayArrayConverter_Type); if (PyType_Ready(&PyArrayMethod_Type) < 0) { - goto err; + return -1; } if (PyType_Ready(&PyBoundArrayMethod_Type) < 0) { - goto err; + return -1; } if (initialize_and_map_pytypes_to_dtypes() < 0) { - goto err; + return -1; } if (PyArray_InitializeCasts() < 0) { - goto err; + return -1; } if (init_string_dtype() < 0) { - goto err; + return -1; } /* @@ -5040,7 +5170,7 @@ PyMODINIT_FUNC PyInit__multiarray_umath(void) { PyDataMem_DefaultHandler = PyCapsule_New( &default_handler, MEM_HANDLER_CAPSULE_NAME, NULL); if (PyDataMem_DefaultHandler == NULL) { - goto err; + return -1; } /* @@ -5049,32 +5179,32 @@ PyMODINIT_FUNC PyInit__multiarray_umath(void) { */ current_handler = PyContextVar_New("current_allocator", PyDataMem_DefaultHandler); if (current_handler == NULL) { - goto err; + return -1; } if (initumath(m) != 0) { - goto err; + return -1; } if (set_matmul_flags(d) < 0) { - goto err; + return -1; } // initialize static references to ndarray.__array_*__ special methods npy_static_pydata.ndarray_array_finalize = PyObject_GetAttrString( (PyObject *)&PyArray_Type, "__array_finalize__"); if (npy_static_pydata.ndarray_array_finalize == NULL) { - goto err; + return -1; } npy_static_pydata.ndarray_array_ufunc = PyObject_GetAttrString( (PyObject *)&PyArray_Type, "__array_ufunc__"); if (npy_static_pydata.ndarray_array_ufunc == NULL) { - goto err; + return -1; } npy_static_pydata.ndarray_array_function = PyObject_GetAttrString( (PyObject *)&PyArray_Type, "__array_function__"); if (npy_static_pydata.ndarray_array_function == NULL) { - goto err; + return -1; } /* @@ -5090,13 +5220,13 @@ PyMODINIT_FUNC PyInit__multiarray_umath(void) { if (npy_cache_import_runtime( "numpy.dtypes", "_add_dtype_helper", &npy_runtime_imports._add_dtype_helper) == -1) { - goto err; + return -1; } if (PyObject_CallFunction( npy_runtime_imports._add_dtype_helper, "Os", (PyObject *)&PyArray_StringDType, NULL) == NULL) { - goto err; + return -1; } PyDict_SetItemString(d, "StringDType", (PyObject *)&PyArray_StringDType); @@ -5104,13 +5234,13 @@ PyMODINIT_FUNC PyInit__multiarray_umath(void) { npy_static_pydata.zero_pyint_like_arr = PyArray_ZEROS( 0, NULL, NPY_DEFAULT_INT, NPY_FALSE); if (npy_static_pydata.zero_pyint_like_arr == NULL) { - goto err; + return -1; } ((PyArrayObject_fields *)npy_static_pydata.zero_pyint_like_arr)->flags |= (NPY_ARRAY_WAS_PYTHON_INT|NPY_ARRAY_WAS_INT_AND_REPLACED); if (verify_static_structs_initialized() < 0) { - goto err; + return -1; } /* @@ -5120,33 +5250,44 @@ PyMODINIT_FUNC PyInit__multiarray_umath(void) { /* The dtype API is not auto-filled/generated via Python scripts: */ _fill_dtype_api(PyArray_API); if (c_api == NULL) { - goto err; + return -1; } PyDict_SetItemString(d, "_ARRAY_API", c_api); Py_DECREF(c_api); c_api = PyCapsule_New((void *)PyUFunc_API, NULL, NULL); if (c_api == NULL) { - goto err; + return -1; } PyDict_SetItemString(d, "_UFUNC_API", c_api); Py_DECREF(c_api); if (PyErr_Occurred()) { - goto err; + return -1; } -#if Py_GIL_DISABLED - // signal this module supports running with the GIL disabled - PyUnstable_Module_SetGIL(m, Py_MOD_GIL_NOT_USED); + return 0; +} + +static struct PyModuleDef_Slot _multiarray_umath_slots[] = { + {Py_mod_exec, _multiarray_umath_exec}, +#if PY_VERSION_HEX >= 0x030c00f0 // Python 3.12+ + {Py_mod_multiple_interpreters, Py_MOD_MULTIPLE_INTERPRETERS_NOT_SUPPORTED}, #endif +#if PY_VERSION_HEX >= 0x030d00f0 // Python 3.13+ + // signal that this module supports running without an active GIL + {Py_mod_gil, Py_MOD_GIL_NOT_USED}, +#endif + {0, NULL}, +}; - return m; +static struct PyModuleDef moduledef = { + .m_base = PyModuleDef_HEAD_INIT, + .m_name = "_multiarray_umath", + .m_size = 0, + .m_methods = array_module_methods, + .m_slots = _multiarray_umath_slots, +}; - err: - if (!PyErr_Occurred()) { - PyErr_SetString(PyExc_RuntimeError, - "cannot load multiarray module."); - } - Py_DECREF(m); - return NULL; +PyMODINIT_FUNC PyInit__multiarray_umath(void) { + return PyModuleDef_Init(&moduledef); } diff --git a/numpy/_core/src/multiarray/nditer_constr.c b/numpy/_core/src/multiarray/nditer_constr.c index 3ed5cf1a0245..ffe37e80c9be 100644 --- a/numpy/_core/src/multiarray/nditer_constr.c +++ b/numpy/_core/src/multiarray/nditer_constr.c @@ -2077,8 +2077,14 @@ npyiter_find_buffering_setup(NpyIter *iter, npy_intp buffersize) npy_intp *strides = NAD_STRIDES(axisdata); for (int iop = 0; iop < nop; iop++) { - /* Check that we set things up nicely (if shape is ever 1) */ - assert((axisdata->shape == 1) ? (prev_strides[iop] == strides[iop]) : 1); + /* + * Check that we set things up nicely so strides coalesc. Except + * for index operands, which currently disrupts coalescing. + * NOTE(seberg): presumably `npyiter_compute_index_strides` should + * not set the strides to 0, but this was safer for backporting. + */ + assert((axisdata->shape != 1) || (prev_strides[iop] == strides[iop]) + || (op_itflags[iop] & (NPY_ITER_C_INDEX|NPY_ITER_F_INDEX))); if (op_single_stride_dims[iop] == idim) { /* Best case: the strides still collapse for this operand. */ @@ -2120,11 +2126,11 @@ npyiter_find_buffering_setup(NpyIter *iter, npy_intp buffersize) break; /* Avoid a zero coresize. */ } - double bufsize = size; - if (bufsize > maximum_size && + double bufsize = (double)size; + if (size > maximum_size && (cost > 1 || !(itflags & NPY_ITFLAG_GROWINNER))) { /* If we need buffering, limit size in cost calculation. */ - bufsize = maximum_size; + bufsize = (double)maximum_size; } NPY_IT_DBG_PRINT(" dim=%d, n_buffered=%d, cost=%g @bufsize=%g (prev scaled cost=%g)\n", diff --git a/numpy/_core/src/multiarray/nditer_pywrap.c b/numpy/_core/src/multiarray/nditer_pywrap.c index 27c392db8720..992bc013af3a 100644 --- a/numpy/_core/src/multiarray/nditer_pywrap.c +++ b/numpy/_core/src/multiarray/nditer_pywrap.c @@ -20,6 +20,7 @@ #include "common.h" #include "conversion_utils.h" #include "ctors.h" +#include "npy_pycompat.h" /* Functions not part of the public NumPy C API */ npy_bool npyiter_has_writeback(NpyIter *iter); @@ -588,7 +589,7 @@ npyiter_prepare_ops(PyObject *op_in, PyObject **out_owner, PyObject ***out_objs) { /* Take ownership of op_in (either a tuple/list or single element): */ if (PyTuple_Check(op_in) || PyList_Check(op_in)) { - PyObject *seq = PySequence_Fast(op_in, "failed accessing item list"); + PyObject *seq = PySequence_Fast(op_in, "failed accessing item list"); // noqa: borrowed-ref OK if (op_in == NULL) { Py_DECREF(op_in); return -1; @@ -719,35 +720,62 @@ npyiter_init(NewNpyArrayIterObject *self, PyObject *args, PyObject *kwds) /* Need nop to set up workspaces */ PyObject **op_objs = NULL; - PyObject *op_in_owned = NULL; /* Sequence/object owning op_objs. */ - int nop = npyiter_prepare_ops(op_in, &op_in_owned, &op_objs); + PyObject *op_in_owned = NULL; /* Sequence/object owning op_objs. */ + PyArray_Descr **op_request_dtypes = NULL; + int pre_alloc_fail = 0; + int post_alloc_fail = 0; + int nop; + NPY_DEFINE_WORKSPACE(op, PyArrayObject *, 2 * 8); + NPY_DEFINE_WORKSPACE(op_flags, npy_uint32, 8); + NPY_DEFINE_WORKSPACE(op_axes_storage, int, 8 * NPY_MAXDIMS); + NPY_DEFINE_WORKSPACE(op_axes, int *, 8); + + NPY_BEGIN_CRITICAL_SECTION_SEQUENCE_FAST(op_in); + + nop = npyiter_prepare_ops(op_in, &op_in_owned, &op_objs); if (nop < 0) { - goto pre_alloc_fail; + pre_alloc_fail = 1; + goto cleanup; } /* allocate workspace for Python objects (operands and dtypes) */ - NPY_ALLOC_WORKSPACE(op, PyArrayObject *, 2 * 8, 2 * nop); + NPY_INIT_WORKSPACE(op, PyArrayObject *, 2 * 8, 2 * nop); if (op == NULL) { - goto pre_alloc_fail; + pre_alloc_fail = 1; + goto cleanup; } memset(op, 0, sizeof(PyObject *) * 2 * nop); - PyArray_Descr **op_request_dtypes = (PyArray_Descr **)(op + nop); + op_request_dtypes = (PyArray_Descr **)(op + nop); /* And other workspaces (that do not need to clean up their content) */ - NPY_ALLOC_WORKSPACE(op_flags, npy_uint32, 8, nop); - NPY_ALLOC_WORKSPACE(op_axes_storage, int, 8 * NPY_MAXDIMS, nop * NPY_MAXDIMS); - NPY_ALLOC_WORKSPACE(op_axes, int *, 8, nop); + NPY_INIT_WORKSPACE(op_flags, npy_uint32, 8, nop); + NPY_INIT_WORKSPACE(op_axes_storage, int, 8 * NPY_MAXDIMS, nop * NPY_MAXDIMS); + NPY_INIT_WORKSPACE(op_axes, int *, 8, nop); /* * Trying to allocate should be OK if one failed, check for error now * that we can use `goto finish` to clean up everything. * (NPY_ALLOC_WORKSPACE has to be done before a goto fail currently.) */ if (op_flags == NULL || op_axes_storage == NULL || op_axes == NULL) { - goto finish; + post_alloc_fail = 1; + goto cleanup; } /* op and op_flags */ if (npyiter_convert_ops(nop, op_objs, op_flags_in, op, op_flags) != 1) { + post_alloc_fail = 1; + goto cleanup; + } + +cleanup:; + + NPY_END_CRITICAL_SECTION_SEQUENCE_FAST(); + + if (pre_alloc_fail) { + goto pre_alloc_fail; + } + + if (post_alloc_fail) { goto finish; } @@ -888,7 +916,7 @@ NpyIter_NestedIters(PyObject *NPY_UNUSED(self), } if (!PyTuple_Check(item) && !PyList_Check(item)) { PyErr_SetString(PyExc_ValueError, - "Each item in axes must be a an integer tuple"); + "Each item in axes must be an integer tuple"); Py_DECREF(item); return NULL; } @@ -1395,7 +1423,10 @@ npyiter_enable_external_loop( return NULL; } - NpyIter_EnableExternalLoop(self->iter); + if (NpyIter_EnableExternalLoop(self->iter) != NPY_SUCCEED) { + return NULL; + } + /* EnableExternalLoop invalidates cached values */ if (npyiter_cache_values(self) < 0) { return NULL; diff --git a/numpy/_core/src/multiarray/npy_static_data.c b/numpy/_core/src/multiarray/npy_static_data.c index 62e1fd3c1b15..997c798c665d 100644 --- a/numpy/_core/src/multiarray/npy_static_data.c +++ b/numpy/_core/src/multiarray/npy_static_data.c @@ -38,6 +38,7 @@ intern_strings(void) INTERN_STRING(array_ufunc, "__array_ufunc__"); INTERN_STRING(array_wrap, "__array_wrap__"); INTERN_STRING(array_finalize, "__array_finalize__"); + INTERN_STRING(numpy_dtype, "__numpy_dtype__"); INTERN_STRING(implementation, "_implementation"); INTERN_STRING(axis1, "axis1"); INTERN_STRING(axis2, "axis2"); @@ -64,6 +65,10 @@ intern_strings(void) INTERN_STRING(pyvals_name, "UFUNC_PYVALS_NAME"); INTERN_STRING(legacy, "legacy"); INTERN_STRING(__doc__, "__doc__"); + INTERN_STRING(__signature__, "__signature__"); + INTERN_STRING(copy, "copy"); + INTERN_STRING(dl_device, "dl_device"); + INTERN_STRING(max_version, "max_version"); return 0; } @@ -169,7 +174,8 @@ initialize_static_globals(void) return -1; } - npy_static_pydata.kwnames_is_copy = Py_BuildValue("(s)", "copy"); + npy_static_pydata.kwnames_is_copy = + Py_BuildValue("(O)", npy_interned_str.copy); if (npy_static_pydata.kwnames_is_copy == NULL) { return -1; } @@ -185,7 +191,9 @@ initialize_static_globals(void) } npy_static_pydata.dl_call_kwnames = - Py_BuildValue("(sss)", "dl_device", "copy", "max_version"); + Py_BuildValue("(OOO)", npy_interned_str.dl_device, + npy_interned_str.copy, + npy_interned_str.max_version); if (npy_static_pydata.dl_call_kwnames == NULL) { return -1; } diff --git a/numpy/_core/src/multiarray/npy_static_data.h b/numpy/_core/src/multiarray/npy_static_data.h index 287dc80e4c1f..f3d1135ec044 100644 --- a/numpy/_core/src/multiarray/npy_static_data.h +++ b/numpy/_core/src/multiarray/npy_static_data.h @@ -24,6 +24,7 @@ typedef struct npy_interned_str_struct { PyObject *array_wrap; PyObject *array_finalize; PyObject *array_ufunc; + PyObject *numpy_dtype; PyObject *implementation; PyObject *axis1; PyObject *axis2; @@ -43,6 +44,10 @@ typedef struct npy_interned_str_struct { PyObject *pyvals_name; PyObject *legacy; PyObject *__doc__; + PyObject *__signature__; + PyObject *copy; + PyObject *dl_device; + PyObject *max_version; } npy_interned_str_struct; /* diff --git a/numpy/_core/src/multiarray/number.c b/numpy/_core/src/multiarray/number.c index b801d7e041e2..de4012641684 100644 --- a/numpy/_core/src/multiarray/number.c +++ b/numpy/_core/src/multiarray/number.c @@ -332,6 +332,7 @@ static int fast_scalar_power(PyObject *o1, PyObject *o2, int inplace, PyObject **result) { PyObject *fastop = NULL; + if (PyLong_CheckExact(o2)) { int overflow = 0; long exp = PyLong_AsLongAndOverflow(o2, &overflow); @@ -363,7 +364,12 @@ fast_scalar_power(PyObject *o1, PyObject *o2, int inplace, PyObject **result) } PyArrayObject *a1 = (PyArrayObject *)o1; - if (!(PyArray_ISFLOAT(a1) || PyArray_ISCOMPLEX(a1))) { + if (PyArray_ISOBJECT(a1)) { + return 1; + } + if (fastop != n_ops.square && !PyArray_ISFLOAT(a1) && !PyArray_ISCOMPLEX(a1)) { + // we special-case squaring for any array type + // gh-29388 return 1; } diff --git a/numpy/_core/src/multiarray/refcount.c b/numpy/_core/src/multiarray/refcount.c index 571b50372684..ac70f38f39a5 100644 --- a/numpy/_core/src/multiarray/refcount.c +++ b/numpy/_core/src/multiarray/refcount.c @@ -184,7 +184,7 @@ PyArray_Item_INCREF(char *data, PyArray_Descr *descr) int offset; Py_ssize_t pos = 0; - while (PyDict_Next(PyDataType_FIELDS(descr), &pos, &key, &value)) { + while (PyDict_Next(PyDataType_FIELDS(descr), &pos, &key, &value)) { // noqa: borrowed-ref OK if (NPY_TITLE_KEY(key, value)) { continue; } @@ -246,7 +246,7 @@ PyArray_Item_XDECREF(char *data, PyArray_Descr *descr) int offset; Py_ssize_t pos = 0; - while (PyDict_Next(PyDataType_FIELDS(descr), &pos, &key, &value)) { + while (PyDict_Next(PyDataType_FIELDS(descr), &pos, &key, &value)) { // noqa: borrowed-ref OK if (NPY_TITLE_KEY(key, value)) { continue; } @@ -480,7 +480,7 @@ _fill_with_none(char *optr, PyArray_Descr *dtype) int offset; Py_ssize_t pos = 0; - while (PyDict_Next(PyDataType_FIELDS(dtype), &pos, &key, &value)) { + while (PyDict_Next(PyDataType_FIELDS(dtype), &pos, &key, &value)) { // noqa: borrowed-ref OK if (NPY_TITLE_KEY(key, value)) { continue; } diff --git a/numpy/_core/src/multiarray/scalarapi.c b/numpy/_core/src/multiarray/scalarapi.c index e133b46d008a..a602e312727b 100644 --- a/numpy/_core/src/multiarray/scalarapi.c +++ b/numpy/_core/src/multiarray/scalarapi.c @@ -11,8 +11,7 @@ #include "numpy/npy_math.h" #include "npy_config.h" - - +#include "npy_pycompat.h" // PyObject_GetOptionalAttr #include "array_coercion.h" #include "ctors.h" @@ -343,17 +342,34 @@ PyArray_DescrFromTypeObject(PyObject *type) /* Do special thing for VOID sub-types */ if (PyType_IsSubtype((PyTypeObject *)type, &PyVoidArrType_Type)) { + PyObject *attr; + _PyArray_LegacyDescr *conv = NULL; + int res = PyObject_GetOptionalAttr(type, npy_interned_str.dtype, &attr); + if (res < 0) { + return NULL; // Should be a rather criticial error, so just fail. + } + if (res == 1) { + if (!PyArray_DescrCheck(attr)) { + if (PyObject_HasAttrString(attr, "__get__")) { + /* If the object has a __get__, assume this is a class property. */ + Py_DECREF(attr); + conv = NULL; + } + else { + PyErr_Format(PyExc_ValueError, + "`.dtype` attribute %R is not a valid dtype instance", + attr); + Py_DECREF(attr); + return NULL; + } + } + } + _PyArray_LegacyDescr *new = (_PyArray_LegacyDescr *)PyArray_DescrNewFromType(NPY_VOID); if (new == NULL) { return NULL; } - _PyArray_LegacyDescr *conv = (_PyArray_LegacyDescr *)( - _arraydescr_try_convert_from_dtype_attr(type)); - if (conv == NULL) { - Py_DECREF(new); - return NULL; - } - if ((PyObject *)conv != Py_NotImplemented && PyDataType_ISLEGACY(conv)) { + if (conv != NULL && PyDataType_ISLEGACY(conv)) { new->fields = conv->fields; Py_XINCREF(new->fields); new->names = conv->names; @@ -362,7 +378,7 @@ PyArray_DescrFromTypeObject(PyObject *type) new->subarray = conv->subarray; conv->subarray = NULL; } - Py_DECREF(conv); + Py_XDECREF(conv); Py_XDECREF(new->typeobj); new->typeobj = (PyTypeObject *)type; Py_INCREF(type); @@ -519,15 +535,10 @@ PyArray_Scalar(void *data, PyArray_Descr *descr, PyObject *base) if (buff == NULL) { return PyErr_NoMemory(); } - /* copyswap needs an array object, but only actually cares about the - * dtype - */ - PyArrayObject_fields dummy_arr; - if (base == NULL) { - dummy_arr.descr = descr; - base = (PyObject *)&dummy_arr; + memcpy(buff, data, itemsize); + if (swap) { + byte_swap_vector(buff, itemsize / 4, 4); } - copyswap(buff, data, swap, base); /* truncation occurs here */ PyObject *u = PyUnicode_FromKindAndData(PyUnicode_4BYTE_KIND, buff, itemsize / 4); diff --git a/numpy/_core/src/multiarray/scalartypes.c.src b/numpy/_core/src/multiarray/scalartypes.c.src index 03165b10337e..2d63dd6e3602 100644 --- a/numpy/_core/src/multiarray/scalartypes.c.src +++ b/numpy/_core/src/multiarray/scalartypes.c.src @@ -160,7 +160,7 @@ gentype_generic_method(PyObject *self, PyObject *args, PyObject *kwds, * valid self (a generic scalar) and an other item. * May fill self_item and/or other_arr (but not both) with non-NULL values. * - * Why this dance? When the other object is a exactly Python scalar something + * Why this dance? When the other object is exactly a Python scalar something * awkward happens historically in NumPy. * NumPy doesn't define a result, but the ufunc would cast to `astype(object)` * which is the same as `scalar.item()`. And that operation converts e.g. @@ -226,7 +226,7 @@ find_binary_operation_path( */ int was_scalar; PyArrayObject *arr = (PyArrayObject *)PyArray_FromAny_int( - other, NULL, NULL, 0, 0, 0, NULL, &was_scalar); + other, NULL, NULL, 0, NPY_MAXDIMS, 0, NULL, &was_scalar); if (arr == NULL) { return -1; } @@ -939,12 +939,23 @@ datetimetype_repr(PyObject *self) if (legacy_print_mode == -1) { return NULL; } - if (legacy_print_mode > 125) { - ret = PyUnicode_FromFormat("np.datetime64('%s')", iso); - } - else { - ret = PyUnicode_FromFormat("numpy.datetime64('%s')", iso); + + PyObject *meta = metastr_to_unicode(&scal->obmeta, 1); + if((scal->obval == NPY_DATETIME_NAT) && (meta != NULL)){ + if (legacy_print_mode > 125) { + ret = PyUnicode_FromFormat("np.datetime64('%s','%S')", iso, meta); + } else { + ret = PyUnicode_FromFormat("numpy.datetime64('%s','%S')", iso, meta); + } + } else { + if (legacy_print_mode > 125) { + ret = PyUnicode_FromFormat("np.datetime64('%s')", iso); + } + else { + ret = PyUnicode_FromFormat("numpy.datetime64('%s')", iso); + } } + } else { PyObject *meta = metastr_to_unicode(&scal->obmeta, 1); @@ -1929,33 +1940,6 @@ gentype_transpose_get(PyObject *self, void *NPY_UNUSED(ignored)) return self; } -static PyObject * -gentype_newbyteorder(PyObject *NPY_UNUSED(self), void *NPY_UNUSED(ignored)) -{ - PyErr_SetString(PyExc_AttributeError, - "`newbyteorder` was removed from scalar types in NumPy 2.0. " - "Use `sc.view(sc.dtype.newbyteorder(order))` instead."); - return NULL; -} - -static PyObject * -gentype_itemset(PyObject *NPY_UNUSED(self), void *NPY_UNUSED(ignored)) -{ - PyErr_SetString(PyExc_AttributeError, - "`itemset` was removed from scalar types in NumPy 2.0 " - "because scalars are immutable."); - return NULL; -} - -static PyObject * -gentype_ptp(PyObject *NPY_UNUSED(self), void *NPY_UNUSED(ignored)) -{ - PyErr_SetString(PyExc_AttributeError, - "`ptp` was removed from scalar types in NumPy 2.0. " - "For a scalar, the range of values always equals 0."); - return NULL; -} - static PyGetSetDef gentype_getsets[] = { {"ndim", @@ -2000,15 +1984,6 @@ static PyGetSetDef gentype_getsets[] = { {"T", (getter)gentype_transpose_get, (setter)0, NULL, NULL}, - {"newbyteorder", - (getter)gentype_newbyteorder, - (setter)0, NULL, NULL}, - {"itemset", - (getter)gentype_itemset, - (setter)0, NULL, NULL}, - {"ptp", - (getter)gentype_ptp, - (setter)0, NULL, NULL}, {"device", (getter)array_device, (setter)0, NULL, NULL}, @@ -2082,12 +2057,11 @@ gentype_wraparray(PyObject *NPY_UNUSED(scalar), PyObject *args) /* * These gentype_* functions do not take keyword arguments. - * The proper flag is METH_VARARGS. + * The proper flag is METH_VARARGS or METH_NOARGS. */ /**begin repeat * - * #name = tolist, item, __deepcopy__, __copy__, - * swapaxes, conj, conjugate, nonzero, + * #name = tolist, item, swapaxes, conj, conjugate, nonzero, * fill, transpose# */ static PyObject * @@ -2097,6 +2071,34 @@ gentype_@name@(PyObject *self, PyObject *args) } /**end repeat**/ +static PyObject * +gentype___copy__(PyObject *self) +{ + // scalars are immutable, so we can return a new reference + // the only expections are scalars with void dtype + if (PyObject_IsInstance(self, (PyObject *)&PyVoidArrType_Type)) { + // path via array + return gentype_generic_method(self, NULL, NULL, "__copy__"); + } + return Py_NewRef(self); +} + +static PyObject * +gentype___deepcopy__(PyObject *self, PyObject *args) +{ + // note: maybe the signature needs to be updated as __deepcopy__ can accept the keyword memo + + // scalars are immutable, so we can return a new reference + // the only expections are scalars with void dtype + // if the number of arguments is not 1, we let gentype_generic_method do the + // error handling + if (PyObject_IsInstance(self, (PyObject *)&PyVoidArrType_Type) || (PyTuple_Size(args)!=1)) { + // path via array + return gentype_generic_method(self, args, NULL, "__deepcopy__"); + } + return Py_NewRef(self); +} + static PyObject * gentype_byteswap(PyObject *self, PyObject *args, PyObject *kwds) { @@ -2147,7 +2149,7 @@ gentype_byteswap(PyObject *self, PyObject *args, PyObject *kwds) * #name = take, getfield, put, repeat, tofile, mean, trace, diagonal, clip, * std, var, sum, cumsum, prod, cumprod, compress, sort, argsort, * round, argmax, argmin, max, min, any, all, astype, resize, - * reshape, choose, tostring, tobytes, copy, searchsorted, view, + * reshape, choose, tobytes, copy, searchsorted, view, * flatten, ravel, squeeze# */ static PyObject * @@ -2605,9 +2607,6 @@ static PyMethodDef gentype_methods[] = { {"tofile", (PyCFunction)gentype_tofile, METH_VARARGS | METH_KEYWORDS, NULL}, - {"tostring", - (PyCFunction)gentype_tostring, - METH_VARARGS | METH_KEYWORDS, NULL}, {"byteswap", (PyCFunction)gentype_byteswap, METH_VARARGS | METH_KEYWORDS, NULL}, @@ -2641,7 +2640,7 @@ static PyMethodDef gentype_methods[] = { /* for the copy module */ {"__copy__", (PyCFunction)gentype___copy__, - METH_VARARGS, NULL}, + METH_NOARGS, NULL}, {"__deepcopy__", (PyCFunction)gentype___deepcopy__, METH_VARARGS, NULL}, @@ -2842,6 +2841,16 @@ static PyMethodDef numbertype_methods[] = { {NULL, NULL, 0, NULL} /* sentinel */ }; +/**begin repeat + * #name = boolean,datetime# + */ +static PyMethodDef @name@type_methods[] = { + /* for typing */ + {"__class_getitem__", Py_GenericAlias, METH_CLASS | METH_O, NULL}, + {NULL, NULL, 0, NULL} /* sentinel */ +}; +/**end repeat**/ + /**begin repeat * #name = cfloat,clongdouble# */ @@ -3900,7 +3909,6 @@ static npy_hash_t @lname@_arrtype_hash(PyObject *obj) { PyArray_DatetimeMetaData *meta; - PyArray_Descr *dtype; npy_@lname@ val = PyArrayScalar_VAL(obj, @name@); if (val == NPY_DATETIME_NAT) { @@ -3908,10 +3916,10 @@ static npy_hash_t return PyBaseObject_Type.tp_hash(obj); } - dtype = PyArray_DescrFromScalar(obj); - meta = get_datetime_metadata_from_dtype(dtype); + meta = &((PyDatetimeScalarObject *)obj)->obmeta; - return @lname@_hash(meta, val); + npy_hash_t res = @lname@_hash(meta, val); + return res; } /**end repeat**/ @@ -4548,6 +4556,7 @@ initialize_numeric_types(void) /**end repeat**/ + PyDatetimeArrType_Type.tp_methods = datetimetype_methods; /**begin repeat * #Type = Byte, UByte, Short, UShort, Int, UInt, Long, @@ -4561,6 +4570,7 @@ initialize_numeric_types(void) PyBoolArrType_Type.tp_str = genbool_type_str; PyBoolArrType_Type.tp_repr = genbool_type_repr; + PyBoolArrType_Type.tp_methods = booleantype_methods; /**begin repeat diff --git a/numpy/_core/src/multiarray/shape.c b/numpy/_core/src/multiarray/shape.c index 340fe7289ac8..fce61ef36e63 100644 --- a/numpy/_core/src/multiarray/shape.c +++ b/numpy/_core/src/multiarray/shape.c @@ -26,20 +26,12 @@ static int _attempt_nocopy_reshape(PyArrayObject *self, int newnd, const npy_intp *newdims, npy_intp *newstrides, int is_f_order); -/*NUMPY_API - * Resize (reallocate data). Only works if nothing else is referencing this - * array and it is contiguous. If refcheck is 0, then the reference count is - * not checked and assumed to be 1. You still must own this data and have no - * weak-references and no base object. - */ -NPY_NO_EXPORT PyObject * -PyArray_Resize(PyArrayObject *self, PyArray_Dims *newshape, int refcheck, - NPY_ORDER NPY_UNUSED(order)) +NPY_NO_EXPORT int +PyArray_Resize_int(PyArrayObject *self, PyArray_Dims *newshape, int refcheck) { npy_intp oldnbytes, newnbytes; npy_intp oldsize, newsize; int new_nd=newshape->len, k, elsize; - int refcnt; npy_intp* new_dimensions=newshape->ptr; npy_intp new_strides[NPY_MAXDIMS]; npy_intp *dimptr; @@ -48,7 +40,7 @@ PyArray_Resize(PyArrayObject *self, PyArray_Dims *newshape, int refcheck, if (!PyArray_ISONESEGMENT(self)) { PyErr_SetString(PyExc_ValueError, "resize only works on single-segment arrays"); - return NULL; + return -1; } /* Compute total size of old and new arrays. The new size might overflow */ @@ -62,10 +54,11 @@ PyArray_Resize(PyArrayObject *self, PyArray_Dims *newshape, int refcheck, if (new_dimensions[k] < 0) { PyErr_SetString(PyExc_ValueError, "negative dimensions not allowed"); - return NULL; + return -1; } if (npy_mul_sizes_with_overflow(&newsize, newsize, new_dimensions[k])) { - return PyErr_NoMemory(); + PyErr_NoMemory(); + return -1; } } @@ -73,14 +66,15 @@ PyArray_Resize(PyArrayObject *self, PyArray_Dims *newshape, int refcheck, elsize = PyArray_ITEMSIZE(self); oldnbytes = oldsize * elsize; if (npy_mul_sizes_with_overflow(&newnbytes, newsize, elsize)) { - return PyErr_NoMemory(); + PyErr_NoMemory(); + return -1; } if (oldnbytes != newnbytes) { if (!(PyArray_FLAGS(self) & NPY_ARRAY_OWNDATA)) { PyErr_SetString(PyExc_ValueError, "cannot resize this array: it does not own its data"); - return NULL; + return -1; } if (PyArray_BASE(self) != NULL @@ -89,58 +83,67 @@ PyArray_Resize(PyArrayObject *self, PyArray_Dims *newshape, int refcheck, "cannot resize an array that " "references or is referenced\n" "by another array in this way. Use the np.resize function."); - return NULL; + return -1; } if (refcheck) { #ifdef PYPY_VERSION PyErr_SetString(PyExc_ValueError, "cannot resize an array with refcheck=True on PyPy.\n" "Use the np.resize function or refcheck=False"); - return NULL; + return -1; +#else +#if PY_VERSION_HEX >= 0x030E00B0 + if (!PyUnstable_Object_IsUniquelyReferenced((PyObject *)self)) { #else - refcnt = Py_REFCNT(self); + if (Py_REFCNT(self) > 2) { +#endif + PyErr_SetString( + PyExc_ValueError, + "cannot resize an array that " + "references or is referenced\n" + "by another array in this way.\n" + "Use the np.resize function or refcheck=False"); + return -1; + } #endif /* PYPY_VERSION */ } - else { - refcnt = 1; - } - if (refcnt > 2) { - PyErr_SetString(PyExc_ValueError, - "cannot resize an array that " - "references or is referenced\n" - "by another array in this way.\n" - "Use the np.resize function or refcheck=False"); - return NULL; - } - /* Reallocate space if needed - allocating 0 is forbidden */ PyObject *handler = PyArray_HANDLER(self); if (handler == NULL) { /* This can happen if someone arbitrarily sets NPY_ARRAY_OWNDATA */ PyErr_SetString(PyExc_RuntimeError, "no memory handler found but OWNDATA flag set"); - return NULL; + return -1; } + if (newnbytes < oldnbytes) { + /* Clear now removed data (if dtype has references) */ + if (PyArray_ClearBuffer( + PyArray_DESCR(self), PyArray_BYTES(self) + newnbytes, + elsize, oldsize-newsize, PyArray_ISALIGNED(self)) < 0) { + return -1; + } + } + new_data = PyDataMem_UserRENEW(PyArray_DATA(self), - newnbytes == 0 ? elsize : newnbytes, + newnbytes == 0 ? 1 : newnbytes, handler); if (new_data == NULL) { PyErr_SetString(PyExc_MemoryError, "cannot allocate memory for array"); - return NULL; + return -1; } ((PyArrayObject_fields *)self)->data = new_data; - } - if (newnbytes > oldnbytes && PyArray_ISWRITEABLE(self)) { - /* Fill new memory with zeros (PyLong zero for object arrays) */ - npy_intp stride = elsize; - npy_intp size = newsize - oldsize; - char *data = PyArray_BYTES(self) + oldnbytes; - int aligned = PyArray_ISALIGNED(self); - if (PyArray_ZeroContiguousBuffer(PyArray_DESCR(self), data, - stride, size, aligned) < 0) { - return NULL; + if (newnbytes > oldnbytes && PyArray_ISWRITEABLE(self)) { + /* Fill new memory with zeros (PyLong zero for object arrays) */ + npy_intp stride = elsize; + npy_intp size = newsize - oldsize; + char *data = PyArray_BYTES(self) + oldnbytes; + int aligned = PyArray_ISALIGNED(self); + if (PyArray_ZeroContiguousBuffer(PyArray_DESCR(self), data, + stride, size, aligned) < 0) { + return -1; + } } } @@ -153,7 +156,7 @@ PyArray_Resize(PyArrayObject *self, PyArray_Dims *newshape, int refcheck, if (dimptr == NULL) { PyErr_SetString(PyExc_MemoryError, "cannot allocate memory for array"); - return NULL; + return -1; } ((PyArrayObject_fields *)self)->dimensions = dimptr; ((PyArrayObject_fields *)self)->strides = dimptr + new_nd; @@ -166,11 +169,27 @@ PyArray_Resize(PyArrayObject *self, PyArray_Dims *newshape, int refcheck, memmove(PyArray_STRIDES(self), new_strides, new_nd*sizeof(npy_intp)); } else { - PyDimMem_FREE(((PyArrayObject_fields *)self)->dimensions); + npy_free_cache_dim_array(self); ((PyArrayObject_fields *)self)->nd = 0; ((PyArrayObject_fields *)self)->dimensions = NULL; ((PyArrayObject_fields *)self)->strides = NULL; } + return 0; +} + +/*NUMPY_API + * Resize (reallocate data). Only works if nothing else is referencing this + * array and it is contiguous. If refcheck is 0, then the reference count is + * not checked and assumed to be 1. You still must own this data and have no + * weak-references and no base object. + */ +NPY_NO_EXPORT PyObject * +PyArray_Resize(PyArrayObject *self, PyArray_Dims *newshape, int refcheck, + NPY_ORDER NPY_UNUSED(order)) +{ + if (PyArray_Resize_int(self, newshape, refcheck) < 0) { + return NULL; + } Py_RETURN_NONE; } diff --git a/numpy/_core/src/multiarray/shape.h b/numpy/_core/src/multiarray/shape.h index a9b91feb0b4a..5e87116f08df 100644 --- a/numpy/_core/src/multiarray/shape.h +++ b/numpy/_core/src/multiarray/shape.h @@ -3,6 +3,12 @@ #include "conversion_utils.h" +/* + * Internal version of PyArray_Resize that returns -1 on error, 0 otherwise. + */ +NPY_NO_EXPORT int +PyArray_Resize_int(PyArrayObject *self, PyArray_Dims *newshape, int refcheck); + /* * Creates a sorted stride perm matching the KEEPORDER behavior * of the NpyIter object. Because this operates based on multiple diff --git a/numpy/_core/src/multiarray/stringdtype/casts.cpp b/numpy/_core/src/multiarray/stringdtype/casts.cpp index f66727501f97..20ef3013bf86 100644 --- a/numpy/_core/src/multiarray/stringdtype/casts.cpp +++ b/numpy/_core/src/multiarray/stringdtype/casts.cpp @@ -18,6 +18,7 @@ #include "numpyos.h" #include "umathmodule.h" #include "gil_utils.h" +#include "raii_utils.hpp" #include "static_string.h" #include "dtypemeta.h" #include "dtype.h" @@ -605,7 +606,7 @@ load_non_nullable_string(char *in, int has_null, const npy_static_string *defaul const npy_packed_static_string *ps = (npy_packed_static_string *)in; int isnull = NpyString_load(allocator, ps, string_to_load); if (isnull == -1) { - const char *msg = "Failed to load string for conversion to a non-nullable type"; + const char msg[] = "Failed to load string for conversion to a non-nullable type"; if (has_gil) { PyErr_SetString(PyExc_MemoryError, msg); @@ -617,7 +618,7 @@ load_non_nullable_string(char *in, int has_null, const npy_static_string *defaul } else if (isnull) { if (has_null) { - const char *msg = "Arrays with missing data cannot be converted to a non-nullable type"; + const char msg[] = "Arrays with missing data cannot be converted to a non-nullable type"; if (has_gil) { PyErr_SetString(PyExc_ValueError, msg); @@ -821,8 +822,8 @@ static PyType_Slot s2int_slots[] = { static const char * make_s2type_name(NPY_TYPES typenum) { - const char *prefix = "cast_StringDType_to_"; - size_t plen = strlen(prefix); + const char prefix[] = "cast_StringDType_to_"; + size_t plen = sizeof(prefix)/sizeof(char) - 1; const char *type_name = typenum_to_cstr(typenum); size_t nlen = strlen(type_name); @@ -833,31 +834,36 @@ make_s2type_name(NPY_TYPES typenum) { return NULL; } - // memcpy instead of strcpy to avoid stringop-truncation warning, since - // we are not including the trailing null character - memcpy(buf, prefix, plen); - strncat(buf, type_name, nlen); + // memcpy instead of strcpy/strncat to avoid stringop-truncation warning, + // since we are not including the trailing null character + char *p = buf; + memcpy(p, prefix, plen); + p += plen; + memcpy(p, type_name, nlen); return buf; } static const char * make_type2s_name(NPY_TYPES typenum) { - const char *prefix = "cast_"; - size_t plen = strlen(prefix); + const char prefix[] = "cast_"; + size_t plen = sizeof(prefix)/sizeof(char) - 1; const char *type_name = typenum_to_cstr(typenum); size_t nlen = strlen(type_name); - const char *suffix = "_to_StringDType"; - size_t slen = strlen(suffix); + const char suffix[] = "_to_StringDType"; + size_t slen = sizeof(prefix)/sizeof(char) - 1; char *buf = (char *)PyMem_RawCalloc(sizeof(char), plen + nlen + slen + 1); - // memcpy instead of strcpy to avoid stringop-truncation warning, since - // we are not including the trailing null character - memcpy(buf, prefix, plen); - strncat(buf, type_name, nlen); - strncat(buf, suffix, slen); + // memcpy instead of strcpy/strncat to avoid stringop-truncation warning, + // since we are not including the trailing null character + char *p = buf; + memcpy(p, prefix, plen); + p += plen; + memcpy(p, type_name, nlen); + p += nlen; + memcpy(p, suffix, slen); return buf; } @@ -1905,7 +1911,8 @@ string_to_bytes(PyArrayMethod_Context *context, char *const data[], NpyAuxData *NPY_UNUSED(auxdata)) { PyArray_StringDTypeObject *descr = (PyArray_StringDTypeObject *)context->descriptors[0]; - npy_string_allocator *allocator = NpyString_acquire_allocator(descr); + np::raii::NpyStringAcquireAllocator alloc(descr); + int has_null = descr->na_object != NULL; int has_string_na = descr->has_string_na; const npy_static_string *default_string = &descr->default_string; @@ -1921,22 +1928,22 @@ string_to_bytes(PyArrayMethod_Context *context, char *const data[], const npy_packed_static_string *ps = (npy_packed_static_string *)in; npy_static_string s = {0, NULL}; if (load_nullable_string(ps, &s, has_null, has_string_na, - default_string, na_name, allocator, + default_string, na_name, alloc.allocator(), "in string to bytes cast") == -1) { - goto fail; + return -1; } for (size_t i=0; i 127) { - NPY_ALLOW_C_API_DEF; - NPY_ALLOW_C_API; + np::raii::EnsureGIL ensure_gil{}; + PyObject *str = PyUnicode_FromStringAndSize(s.buf, s.size); if (str == NULL) { PyErr_SetString( PyExc_UnicodeEncodeError, "Invalid character encountered during unicode encoding." ); - goto fail; + return -1; } PyObject *exc = PyObject_CallFunction( @@ -1951,14 +1958,13 @@ string_to_bytes(PyArrayMethod_Context *context, char *const data[], if (exc == NULL) { Py_DECREF(str); - goto fail; + return -1; } PyErr_SetObject(PyExceptionInstance_Class(exc), exc); Py_DECREF(exc); Py_DECREF(str); - NPY_DISABLE_C_API; - goto fail; + return -1; } } @@ -1971,15 +1977,7 @@ string_to_bytes(PyArrayMethod_Context *context, char *const data[], out += out_stride; } - NpyString_release_allocator(allocator); - return 0; - -fail: - - NpyString_release_allocator(allocator); - - return -1; } static PyType_Slot s2bytes_slots[] = { diff --git a/numpy/_core/src/multiarray/stringdtype/dtype.c b/numpy/_core/src/multiarray/stringdtype/dtype.c index a06e7a1ed1b6..7abf4b9303af 100644 --- a/numpy/_core/src/multiarray/stringdtype/dtype.c +++ b/numpy/_core/src/multiarray/stringdtype/dtype.c @@ -18,6 +18,7 @@ #include "conversion_utils.h" #include "npy_import.h" #include "multiarraymodule.h" +#include "npy_sort.h" /* * Internal helper to create new instances @@ -398,14 +399,7 @@ stringdtype_getitem(PyArray_StringDTypeObject *descr, char **dataptr) } } else { -#ifndef PYPY_VERSION val_obj = PyUnicode_FromStringAndSize(sdata.buf, sdata.size); -#else - // work around pypy issue #4046, can delete this when the fix is in - // a released version of pypy - val_obj = PyUnicode_FromStringAndSize( - sdata.buf == NULL ? "" : sdata.buf, sdata.size); -#endif if (val_obj == NULL) { goto fail; } @@ -459,6 +453,7 @@ compare(void *a, void *b, void *arr) return ret; } +// We assume the allocator mutex is already held. int _compare(void *a, void *b, PyArray_StringDTypeObject *descr_a, PyArray_StringDTypeObject *descr_b) @@ -516,6 +511,17 @@ _compare(void *a, void *b, PyArray_StringDTypeObject *descr_a, return NpyString_cmp(&s_a, &s_b); } +int +_sort_compare(const void *a, const void *b, void *context) +{ + PyArrayMethod_Context *sort_context = (PyArrayMethod_Context *)context; + PyArray_StringDTypeObject *sdescr = + (PyArray_StringDTypeObject *)sort_context->descriptors[0]; + + int ret = _compare((void *)a, (void *)b, sdescr, sdescr); + return ret; +} + // PyArray_ArgFunc // The max element is the one with the highest unicode code point. int @@ -667,6 +673,111 @@ static PyType_Slot PyArray_StringDType_Slots[] = { {0, NULL}}; +/* + * Wrap the sort loop to acquire/release the string allocator, + * and pick the correct internal implementation. + */ +static int +stringdtype_wrap_sort_loop( + PyArrayMethod_Context *context, + char *const *data, const npy_intp *dimensions, const npy_intp *strides, + NpyAuxData *transferdata) +{ + PyArray_StringDTypeObject *sdescr = + (PyArray_StringDTypeObject *)context->descriptors[0]; + + npy_string_allocator *allocator = NpyString_acquire_allocator(sdescr); + int ret = npy_default_sort_loop(context, data, dimensions, strides, transferdata); + NpyString_release_allocator(allocator); + return ret; +} + +/* + * This is currently required even though the default implementation would work, + * because the output, though enforced to be equal to the input, is parametric. + */ +static NPY_CASTING +stringdtype_sort_resolve_descriptors( + PyArrayMethodObject *method, + PyArray_DTypeMeta *const *dtypes, + PyArray_Descr *const *input_descrs, + PyArray_Descr **output_descrs, + npy_intp *view_offset) +{ + output_descrs[0] = NPY_DT_CALL_ensure_canonical(input_descrs[0]); + if (NPY_UNLIKELY(output_descrs[0] == NULL)) { + return -1; + } + output_descrs[1] = NPY_DT_CALL_ensure_canonical(input_descrs[1]); + if (NPY_UNLIKELY(output_descrs[1] == NULL)) { + Py_XDECREF(output_descrs[0]); + return -1; + } + + return method->casting; +} + +static int +stringdtype_wrap_argsort_loop( + PyArrayMethod_Context *context, + char *const *data, const npy_intp *dimensions, const npy_intp *strides, + NpyAuxData *transferdata) +{ + PyArray_StringDTypeObject *sdescr = + (PyArray_StringDTypeObject *)context->descriptors[0]; + + npy_string_allocator *allocator = NpyString_acquire_allocator(sdescr); + int ret = npy_default_argsort_loop(context, data, dimensions, strides, transferdata); + NpyString_release_allocator(allocator); + return ret; +} + +static int +stringdtype_get_sort_loop( + PyArrayMethod_Context *context, + int aligned, int move_references, + const npy_intp *strides, + PyArrayMethod_StridedLoop **out_loop, + NpyAuxData **out_transferdata, + NPY_ARRAYMETHOD_FLAGS *flags) +{ + PyArrayMethod_SortParameters *parameters = (PyArrayMethod_SortParameters *)context->parameters; + *flags |= NPY_METH_NO_FLOATINGPOINT_ERRORS; + + if ((parameters->flags == NPY_SORT_STABLE) + || parameters->flags == NPY_SORT_DEFAULT) { + *out_loop = (PyArrayMethod_StridedLoop *)stringdtype_wrap_sort_loop; + } + else { + PyErr_SetString(PyExc_RuntimeError, "unsupported sort kind"); + return -1; + } + return 0; +} + +static int +stringdtype_get_argsort_loop( + PyArrayMethod_Context *context, + int aligned, int move_references, + const npy_intp *strides, + PyArrayMethod_StridedLoop **out_loop, + NpyAuxData **out_transferdata, + NPY_ARRAYMETHOD_FLAGS *flags) +{ + PyArrayMethod_SortParameters *parameters = (PyArrayMethod_SortParameters *)context->parameters; + *flags |= NPY_METH_NO_FLOATINGPOINT_ERRORS; + + if (parameters->flags == NPY_SORT_STABLE + || parameters->flags == NPY_SORT_DEFAULT) { + *out_loop = (PyArrayMethod_StridedLoop *)stringdtype_wrap_argsort_loop; + } + else { + PyErr_SetString(PyExc_RuntimeError, "unsupported sort kind"); + return -1; + } + return 0; +} + static PyObject * stringdtype_new(PyTypeObject *NPY_UNUSED(cls), PyObject *args, PyObject *kwds) { @@ -831,6 +942,62 @@ PyArray_DTypeMeta PyArray_StringDType = { /* rest, filled in during DTypeMeta initialization */ }; +NPY_NO_EXPORT int +init_stringdtype_sorts(void) +{ + PyArray_DTypeMeta *stringdtype = &PyArray_StringDType; + + PyArray_DTypeMeta *sort_dtypes[2] = {stringdtype, stringdtype}; + PyType_Slot sort_slots[4] = { + {NPY_METH_resolve_descriptors, &stringdtype_sort_resolve_descriptors}, + {NPY_METH_get_loop, &stringdtype_get_sort_loop}, + {_NPY_METH_static_data, &_sort_compare}, + {0, NULL} + }; + PyArrayMethod_Spec sort_spec = { + .name = "stringdtype_sort", + .nin = 1, + .nout = 1, + .dtypes = sort_dtypes, + .slots = sort_slots, + .flags = NPY_METH_NO_FLOATINGPOINT_ERRORS, + }; + + PyBoundArrayMethodObject *sort_method = PyArrayMethod_FromSpec_int( + &sort_spec, 1); + if (sort_method == NULL) { + return -1; + } + NPY_DT_SLOTS(stringdtype)->sort_meth = sort_method->method; + Py_INCREF(sort_method->method); + Py_DECREF(sort_method); + + PyArray_DTypeMeta *argsort_dtypes[2] = {stringdtype, &PyArray_IntpDType}; + PyType_Slot argsort_slots[3] = { + {NPY_METH_get_loop, &stringdtype_get_argsort_loop}, + {_NPY_METH_static_data, &_sort_compare}, + {0, NULL} + }; + PyArrayMethod_Spec argsort_spec = { + .name = "stringdtype_argsort", + .nin = 1, + .nout = 1, + .dtypes = argsort_dtypes, + .slots = argsort_slots, + .flags = NPY_METH_NO_FLOATINGPOINT_ERRORS, + }; + + PyBoundArrayMethodObject *argsort_method = PyArrayMethod_FromSpec_int( + &argsort_spec, 1); + if (argsort_method == NULL) { + return -1; + } + NPY_DT_SLOTS(stringdtype)->argsort_meth = argsort_method->method; + Py_INCREF(argsort_method->method); + Py_DECREF(argsort_method); + return 0; +} + NPY_NO_EXPORT int init_string_dtype(void) { @@ -876,6 +1043,10 @@ init_string_dtype(void) PyMem_Free(PyArray_StringDType_casts); + if (init_stringdtype_sorts() < 0) { + return -1; + } + return 0; } diff --git a/numpy/_core/src/multiarray/stringdtype/static_string.c b/numpy/_core/src/multiarray/stringdtype/static_string.c index 02ab7d246a7a..c437fab2d336 100644 --- a/numpy/_core/src/multiarray/stringdtype/static_string.c +++ b/numpy/_core/src/multiarray/stringdtype/static_string.c @@ -155,8 +155,6 @@ vstring_buffer(npy_string_arena *arena, _npy_static_string_u *string) return (char *)((size_t)arena->buffer + string->vstring.offset); } -#define ARENA_EXPAND_FACTOR 1.25 - static char * arena_malloc(npy_string_arena *arena, npy_string_realloc_func r, size_t size) { @@ -168,24 +166,17 @@ arena_malloc(npy_string_arena *arena, npy_string_realloc_func r, size_t size) else { string_storage_size = size + sizeof(size_t); } - if ((arena->size - arena->cursor) <= string_storage_size) { - // realloc the buffer so there is enough room - // first guess is to double the size of the buffer - size_t newsize; - if (arena->size == 0) { - newsize = string_storage_size; - } - else if (((ARENA_EXPAND_FACTOR * arena->size) - arena->cursor) > - string_storage_size) { - newsize = ARENA_EXPAND_FACTOR * arena->size; + if ((arena->size - arena->cursor) < string_storage_size) { + size_t minsize = arena->cursor + string_storage_size; + if (minsize < arena->cursor) { + return NULL; // overflow means out of memory } - else { - newsize = arena->size + string_storage_size; - } - if ((arena->cursor + size) >= newsize) { - // need extra room beyond the expansion factor, leave some padding - newsize = ARENA_EXPAND_FACTOR * (arena->cursor + size); + // Allocate 25% more than needed for this string. + size_t newsize = minsize + minsize / 4; + if (newsize < minsize) { + return NULL; // overflow means out of memory } + // passing a NULL buffer to realloc is the same as malloc char *newbuf = r(arena->buffer, newsize); if (newbuf == NULL) { @@ -404,7 +395,7 @@ NpyString_release_allocators(size_t length, npy_string_allocator *allocators[]) } } -static const char * const EMPTY_STRING = ""; +static const char EMPTY_STRING[] = ""; /*NUMPY_API * Extract the packed contents of *packed_string* into *unpacked_string*. @@ -478,7 +469,7 @@ heap_or_arena_allocate(npy_string_allocator *allocator, if (*flags == 0) { // string isn't previously allocated, so add to existing arena allocation char *ret = arena_malloc(arena, allocator->realloc, sizeof(char) * size); - if (size < NPY_MEDIUM_STRING_MAX_SIZE) { + if (size <= NPY_MEDIUM_STRING_MAX_SIZE) { *flags = NPY_STRING_INITIALIZED; } else { diff --git a/numpy/_core/src/multiarray/temp_elide.c b/numpy/_core/src/multiarray/temp_elide.c index 9236476c4213..ea6cac08f78b 100644 --- a/numpy/_core/src/multiarray/temp_elide.c +++ b/numpy/_core/src/multiarray/temp_elide.c @@ -5,6 +5,7 @@ #include #include "npy_config.h" +#include "npy_pycompat.h" #include "numpy/arrayobject.h" #define NPY_NUMBER_MAX(a, b) ((a) > (b) ? (a) : (b)) @@ -115,10 +116,14 @@ check_unique_temporary(PyObject *lhs) #if PY_VERSION_HEX == 0x030E00A7 && !defined(PYPY_VERSION) #error "NumPy is broken on CPython 3.14.0a7, please update to a newer version" #elif PY_VERSION_HEX >= 0x030E00B1 && !defined(PYPY_VERSION) + // Python 3.14 changed the semantics for reference counting temporaries // see https://github.com/python/cpython/issues/133164 return PyUnstable_Object_IsUniqueReferencedTemporary(lhs); #else - return 1; + // equivalent to Py_REFCNT(lhs) == 1 except on 3.13t + // we need to use the backport on 3.13t because + // this function was first exposed in 3.14 + return PyUnstable_Object_IsUniquelyReferenced(lhs); #endif } @@ -303,13 +308,13 @@ can_elide_temp(PyObject *olhs, PyObject *orhs, int *cannot) * array of a basic type, own its data and size larger than threshold */ PyArrayObject *alhs = (PyArrayObject *)olhs; - if (Py_REFCNT(olhs) != 1 || !PyArray_CheckExact(olhs) || + if (!check_unique_temporary(olhs) || + !PyArray_CheckExact(olhs) || !PyArray_ISNUMBER(alhs) || !PyArray_CHKFLAGS(alhs, NPY_ARRAY_OWNDATA) || !PyArray_ISWRITEABLE(alhs) || PyArray_CHKFLAGS(alhs, NPY_ARRAY_WRITEBACKIFCOPY) || - PyArray_NBYTES(alhs) < NPY_MIN_ELIDE_BYTES || - !check_unique_temporary(olhs)) { + PyArray_NBYTES(alhs) < NPY_MIN_ELIDE_BYTES) { return 0; } if (PyArray_CheckExact(orhs) || @@ -382,12 +387,12 @@ NPY_NO_EXPORT int can_elide_temp_unary(PyArrayObject * m1) { int cannot; - if (Py_REFCNT(m1) != 1 || !PyArray_CheckExact(m1) || + if (!check_unique_temporary((PyObject *)m1) || + !PyArray_CheckExact(m1) || !PyArray_ISNUMBER(m1) || !PyArray_CHKFLAGS(m1, NPY_ARRAY_OWNDATA) || !PyArray_ISWRITEABLE(m1) || - PyArray_NBYTES(m1) < NPY_MIN_ELIDE_BYTES || - !check_unique_temporary((PyObject *)m1)) { + PyArray_NBYTES(m1) < NPY_MIN_ELIDE_BYTES) { return 0; } if (check_callers(&cannot)) { diff --git a/numpy/_core/src/multiarray/textreading/conversions.h b/numpy/_core/src/multiarray/textreading/conversions.h index 09f2510413b5..e30b28a9a7af 100644 --- a/numpy/_core/src/multiarray/textreading/conversions.h +++ b/numpy/_core/src/multiarray/textreading/conversions.h @@ -1,12 +1,12 @@ #ifndef NUMPY_CORE_SRC_MULTIARRAY_TEXTREADING_CONVERSIONS_H_ #define NUMPY_CORE_SRC_MULTIARRAY_TEXTREADING_CONVERSIONS_H_ -#include - #define NPY_NO_DEPRECATED_API NPY_API_VERSION #define _MULTIARRAYMODULE #include "numpy/arrayobject.h" +#include + #include "textreading/parser_config.h" NPY_NO_EXPORT int diff --git a/numpy/_core/src/multiarray/textreading/rows.c b/numpy/_core/src/multiarray/textreading/rows.c index c459fa826e53..7f3797b58928 100644 --- a/numpy/_core/src/multiarray/textreading/rows.c +++ b/numpy/_core/src/multiarray/textreading/rows.c @@ -8,6 +8,7 @@ #include "numpy/npy_3kcompat.h" #include "npy_pycompat.h" #include "alloc.h" +#include "shape.h" // For PyArray_Resize_int #include #include @@ -61,7 +62,7 @@ create_conv_funcs( Py_ssize_t pos = 0; int error = 0; Py_BEGIN_CRITICAL_SECTION(converters); - while (PyDict_Next(converters, &pos, &key, &value)) { + while (PyDict_Next(converters, &pos, &key, &value)) { // noqa: borrowed-ref OK Py_ssize_t column = PyNumber_AsSsize_t(key, PyExc_IndexError); if (column == -1 && PyErr_Occurred()) { PyErr_Format(PyExc_TypeError, @@ -118,7 +119,7 @@ create_conv_funcs( if (error) { goto error; } - + return conv_funcs; error: @@ -156,7 +157,7 @@ create_conv_funcs( * returned array can differ for strings. * @param homogeneous Whether the datatype of the array is not homogeneous, * i.e. not structured. In this case the number of columns has to be - * discovered an the returned array will be 2-dimensional rather than + * discovered and the returned array will be 2-dimensional rather than * 1-dimensional. * * @returns Returns the result as an array object or NULL on error. The result @@ -175,10 +176,9 @@ read_rows(stream *s, npy_intp row_size = out_descr->elsize; PyObject **conv_funcs = NULL; - bool needs_init = PyDataType_FLAGCHK(out_descr, NPY_NEEDS_INIT); - int ndim = homogeneous ? 2 : 1; npy_intp result_shape[2] = {0, 1}; + PyArray_Dims new_dims = {result_shape, ndim}; /* for resizing */ bool data_array_allocated = data_array == NULL; /* Make sure we own `data_array` for the purpose of error handling */ @@ -311,9 +311,6 @@ read_rows(stream *s, if (data_array == NULL) { goto error; } - if (needs_init) { - memset(PyArray_BYTES(data_array), 0, PyArray_NBYTES(data_array)); - } } else { assert(max_rows >=0); @@ -354,22 +351,15 @@ read_rows(stream *s, "providing a maximum number of rows to read may help."); goto error; } - - char *new_data = PyDataMem_UserRENEW( - PyArray_BYTES(data_array), alloc_size ? alloc_size : 1, - PyArray_HANDLER(data_array)); - if (new_data == NULL) { - PyErr_NoMemory(); + /* + * Resize the array. + */ + result_shape[0] = new_rows; + if (PyArray_Resize_int(data_array, &new_dims, 0) < 0) { goto error; } - /* Replace the arrays data since it may have changed */ - ((PyArrayObject_fields *)data_array)->data = new_data; - ((PyArrayObject_fields *)data_array)->dimensions[0] = new_rows; - data_ptr = new_data + row_count * row_size; + data_ptr = (char *)PyArray_DATA(data_array) + row_count * row_size; data_allocated_rows = new_rows; - if (needs_init) { - memset(data_ptr, '\0', (new_rows - row_count) * row_size); - } } for (Py_ssize_t i = 0; i < actual_num_fields; ++i) { @@ -474,20 +464,13 @@ read_rows(stream *s, /* * Note that if there is no data, `data_array` may still be NULL and - * row_count is 0. In that case, always realloc just in case. + * row_count is 0. In that case, always resize just in case. */ if (data_array_allocated && data_allocated_rows != row_count) { - size_t size = row_count * row_size; - char *new_data = PyDataMem_UserRENEW( - PyArray_BYTES(data_array), size ? size : 1, - PyArray_HANDLER(data_array)); - if (new_data == NULL) { - Py_DECREF(data_array); - PyErr_NoMemory(); - return NULL; + result_shape[0] = row_count; + if (PyArray_Resize_int(data_array, &new_dims, 0) < 0) { + goto error; } - ((PyArrayObject_fields *)data_array)->data = new_data; - ((PyArrayObject_fields *)data_array)->dimensions[0] = row_count; } /* diff --git a/numpy/_core/src/multiarray/unique.cpp b/numpy/_core/src/multiarray/unique.cpp index f36acfdef49a..8fc5b580961e 100644 --- a/numpy/_core/src/multiarray/unique.cpp +++ b/numpy/_core/src/multiarray/unique.cpp @@ -3,143 +3,475 @@ #include -#include +#include +#include +#include #include +#include +#include +#include +#include #include #include "numpy/arrayobject.h" +#include "gil_utils.h" +#include "raii_utils.hpp" +extern "C" { + #include "fnv.h" + #include "npy_argparse.h" + #include "numpy/npy_math.h" + #include "numpy/halffloat.h" +} -// This is to use RAII pattern to handle cpp exceptions while avoiding memory leaks. -// Adapted from https://stackoverflow.com/a/25510879/2536294 -template -struct FinalAction { - FinalAction(F f) : clean_{f} {} - ~FinalAction() { clean_(); } - private: - F clean_; -}; +// HASH_TABLE_INITIAL_BUCKETS is the reserve hashset capacity used in the +// std::unordered_set instances in the various unique_* functions. +// We use min(input_size, HASH_TABLE_INITIAL_BUCKETS) as the initial bucket +// count: +// - Reserving for all elements (isize) may over-allocate when there are few +// unique values. +// - Using a moderate upper bound HASH_TABLE_INITIAL_BUCKETS(1024) keeps +// memory usage reasonable (4 KiB for pointers). +// See https://github.com/numpy/numpy/pull/28767#discussion_r2064267631 +const npy_intp HASH_TABLE_INITIAL_BUCKETS = 1024; + +// +// Create a 1-d array with the given length that has the same +// dtype as the input `arr`. +// +static inline PyArrayObject * +empty_array_like(PyArrayObject *arr, npy_intp length) +{ + PyArray_Descr *descr = PyArray_DESCR(arr); + Py_INCREF(descr); -template -FinalAction finally(F f) { - return FinalAction(f); + // Create the output array. + PyArrayObject *res_obj = + reinterpret_cast( + PyArray_NewFromDescr( + &PyArray_Type, + descr, + 1, // ndim + &length, // shape + NULL, // strides + NULL, // data + NPY_ARRAY_WRITEABLE, // flags + NULL // obj + ) + ); + return res_obj; +} + +template +size_t hash_integer(const T *value, npy_bool equal_nan) { + return std::hash{}(*value); } -template +template +size_t hash_complex(const T *value, npy_bool equal_nan) { + std::complex z = *reinterpret_cast *>(value); + int hasnan = npy_isnan(z.real()) || npy_isnan(z.imag()); + if (equal_nan && hasnan) { + return 0; + } + + // Now, equal_nan is false or neither of the values is not NaN. + // So we don't need to worry about NaN here. + + // Convert -0.0 to 0.0. + if (z.real() == 0.0) { + z.real(NPY_PZERO); + } + if (z.imag() == 0.0) { + z.imag(NPY_PZERO); + } + + size_t hash = npy_fnv1a(reinterpret_cast(&z), sizeof(z)); + return hash; +} + +size_t hash_complex_clongdouble(const npy_clongdouble *value, npy_bool equal_nan) { + std::complex z = + *reinterpret_cast *>(value); + int hasnan = npy_isnan(z.real()) || npy_isnan(z.imag()); + if (equal_nan && hasnan) { + return 0; + } + + // Now, equal_nan is false or neither of the values is not NaN. + // So we don't need to worry about NaN here. + + // Convert -0.0 to 0.0. + if (z.real() == 0.0) { + z.real(NPY_PZEROL); + } + if (z.imag() == 0.0) { + z.imag(NPY_PZEROL); + } + + // Some floating-point complex dtypes (e.g., npy_complex256) include undefined or + // unused bits in their binary representation + // (see: https://github.com/numpy/numpy/blob/main/numpy/_core/src/npymath/npy_math_private.h#L254-L261). + // Because hashing the raw bit pattern would make the hash depend on those + // undefined bits, we extract the mantissa, exponent, and sign components + // explicitly and pack them into a buffer to ensure the hash is well-defined. + #if defined(HAVE_LDOUBLE_INTEL_EXTENDED_12_BYTES_LE) || \ + defined(HAVE_LDOUBLE_INTEL_EXTENDED_16_BYTES_LE) || \ + defined(HAVE_LDOUBLE_MOTOROLA_EXTENDED_12_BYTES_BE) + + constexpr size_t SIZEOF_LDOUBLE_MAN = sizeof(ldouble_man_t); + constexpr size_t SIZEOF_LDOUBLE_EXP = sizeof(ldouble_exp_t); + constexpr size_t SIZEOF_LDOUBLE_SIGN = sizeof(ldouble_sign_t); + constexpr size_t SIZEOF_BUFFER = 2 * (SIZEOF_LDOUBLE_MAN + SIZEOF_LDOUBLE_MAN + SIZEOF_LDOUBLE_EXP + SIZEOF_LDOUBLE_SIGN); + unsigned char buffer[SIZEOF_BUFFER]; + + union IEEEl2bitsrep bits_real{z.real()}, bits_imag{z.imag()}; + size_t offset = 0; + + for (const IEEEl2bitsrep &bits: {bits_real, bits_imag}) { + ldouble_man_t manh = GET_LDOUBLE_MANH(bits); + ldouble_man_t manl = GET_LDOUBLE_MANL(bits); + ldouble_exp_t exp = GET_LDOUBLE_EXP(bits); + ldouble_sign_t sign = GET_LDOUBLE_SIGN(bits); + + std::memcpy(buffer + offset, &manh, SIZEOF_LDOUBLE_MAN); + offset += SIZEOF_LDOUBLE_MAN; + std::memcpy(buffer + offset, &manl, SIZEOF_LDOUBLE_MAN); + offset += SIZEOF_LDOUBLE_MAN; + std::memcpy(buffer + offset, &exp, SIZEOF_LDOUBLE_EXP); + offset += SIZEOF_LDOUBLE_EXP; + std::memcpy(buffer + offset, &sign, SIZEOF_LDOUBLE_SIGN); + offset += SIZEOF_LDOUBLE_SIGN; + } + #else + + const unsigned char* buffer = reinterpret_cast(&z); + constexpr size_t SIZEOF_BUFFER = sizeof(z); + + #endif + + size_t hash = npy_fnv1a(buffer, SIZEOF_BUFFER); + + return hash; +} + +template +int equal_integer(const T *lhs, const T *rhs, npy_bool equal_nan) { + return *lhs == *rhs; +} + +template +int equal_complex(const T *lhs, const T *rhs, npy_bool equal_nan) { + S lhs_real = real(*lhs); + S lhs_imag = imag(*lhs); + int lhs_isnan = npy_isnan(lhs_real) || npy_isnan(lhs_imag); + S rhs_real = real(*rhs); + S rhs_imag = imag(*rhs); + int rhs_isnan = npy_isnan(rhs_real) || npy_isnan(rhs_imag); + + if (lhs_isnan && rhs_isnan) { + return equal_nan; + } + if (lhs_isnan || rhs_isnan) { + return false; + } + // Now both lhs and rhs are not NaN. + return (lhs_real == rhs_real) && (lhs_imag == rhs_imag); +} + +template +void copy_integer(char *data, T *value) { + std::copy_n(value, 1, (T *)data); + return; +} + +template < + typename S, + typename T, + S (*real)(T), + S (*imag)(T), + void (*setreal)(T *, const S), + void (*setimag)(T *, const S) +> +void copy_complex(char *data, T *value) { + setreal((T *)data, real(*value)); + setimag((T *)data, imag(*value)); + return; +} + +template < + typename T, + size_t (*hash_func)(const T *, npy_bool), + int (*equal_func)(const T *, const T *, npy_bool), + void (*copy_func)(char *, T *) +> static PyObject* -unique(PyArrayObject *self) +unique_numeric(PyArrayObject *self, npy_bool equal_nan) { - /* This function takes a numpy array and returns a numpy array containing - the unique values. - - It assumes the numpy array includes data that can be viewed as unsigned integers - of a certain size (sizeof(T)). - - It doesn't need to know the actual type, since it needs to find unique values - among binary representations of the input data. This means it won't apply to - custom or complicated dtypes or string values. - */ - NPY_ALLOW_C_API_DEF; - std::unordered_set hashset; - - NpyIter *iter = NpyIter_New(self, NPY_ITER_READONLY | - NPY_ITER_EXTERNAL_LOOP | - NPY_ITER_REFS_OK | - NPY_ITER_ZEROSIZE_OK | - NPY_ITER_GROWINNER, - NPY_KEEPORDER, NPY_NO_CASTING, - NULL); - // Making sure the iterator is deallocated when the function returns, with - // or w/o an exception - auto iter_dealloc = finally([&]() { NpyIter_Deallocate(iter); }); - if (iter == NULL) { - return NULL; + /* + * Returns a new NumPy array containing the unique values of the input + * array of numeric (integer or complex). + * This function uses hashing to identify uniqueness efficiently. + */ + + auto hash = [equal_nan](const T *value) -> size_t { + return hash_func(value, equal_nan); + }; + auto equal = [equal_nan](const T *lhs, const T *rhs) -> bool { + return equal_func(lhs, rhs, equal_nan); + }; + + using set_type = std::unordered_set; + + // number of elements in the input array + npy_intp isize = PyArray_SIZE(self); + set_type hashset(std::min(isize, HASH_TABLE_INITIAL_BUCKETS), hash, equal); + + { + np::raii::SaveThreadState save_thread_state{}; + + char *idata = PyArray_BYTES(self); + npy_intp istride = PyArray_STRIDES(self)[0]; + for (npy_intp i = 0; i < isize; i++, idata += istride) { + hashset.insert(reinterpret_cast(idata)); + } } - NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(iter, NULL); - if (iternext == NULL) { + PyArrayObject *res_obj = empty_array_like(self, hashset.size()); + if (res_obj == NULL) { return NULL; } - char **dataptr = NpyIter_GetDataPtrArray(iter); - npy_intp *strideptr = NpyIter_GetInnerStrideArray(iter); - npy_intp *innersizeptr = NpyIter_GetInnerLoopSizePtr(iter); - - // release the GIL - PyThreadState *_save; - _save = PyEval_SaveThread(); - // Making sure the GIL is re-acquired when the function returns, with - // or w/o an exception - auto grab_gil = finally([&]() { PyEval_RestoreThread(_save); }); - // first we put the data in a hash map - - if (NpyIter_GetIterSize(iter) > 0) { - do { - char* data = *dataptr; - npy_intp stride = *strideptr; - npy_intp count = *innersizeptr; - - while (count--) { - hashset.insert(*((T *) data)); - data += stride; - } - } while (iternext(iter)); + + { + np::raii::SaveThreadState save_thread_state{}; + + char *odata = PyArray_BYTES(res_obj); + npy_intp ostride = PyArray_STRIDES(res_obj)[0]; + for (auto it = hashset.begin(); it != hashset.end(); it++, odata += ostride) { + copy_func(odata, *it); + } } - npy_intp length = hashset.size(); + return reinterpret_cast(res_obj); +} + +template +static PyObject* +unique_string(PyArrayObject *self, npy_bool equal_nan) +{ + /* + * Returns a new NumPy array containing the unique values of the input + * array of fixed size strings. + * This function uses hashing to identify uniqueness efficiently. + */ - NPY_ALLOW_C_API; PyArray_Descr *descr = PyArray_DESCR(self); - Py_INCREF(descr); - PyObject *res_obj = PyArray_NewFromDescr( - &PyArray_Type, - descr, - 1, // ndim - &length, // shape - NULL, // strides - NULL, // data - // This flag is needed to be able to call .sort on it. - NPY_ARRAY_WRITEABLE, // flags - NULL // obj - ); - NPY_DISABLE_C_API; + // variables for the string + npy_intp itemsize = descr->elsize; + npy_intp num_chars = itemsize / sizeof(T); + + auto hash = [num_chars](const T *value) -> size_t { + return npy_fnv1a(value, num_chars * sizeof(T)); + }; + auto equal = [itemsize](const T *lhs, const T *rhs) -> bool { + return std::memcmp(lhs, rhs, itemsize) == 0; + }; + + using set_type = std::unordered_set; + + // number of elements in the input array + npy_intp isize = PyArray_SIZE(self); + set_type hashset(std::min(isize, HASH_TABLE_INITIAL_BUCKETS), hash, equal); + + { + np::raii::SaveThreadState save_thread_state{}; + char *idata = PyArray_BYTES(self); + npy_intp istride = PyArray_STRIDES(self)[0]; + for (npy_intp i = 0; i < isize; i++, idata += istride) { + hashset.insert(reinterpret_cast(idata)); + } + } + + PyArrayObject *res_obj = empty_array_like(self, hashset.size()); if (res_obj == NULL) { return NULL; } - // then we iterate through the map's keys to get the unique values - T* data = (T *)PyArray_DATA((PyArrayObject *)res_obj); - auto it = hashset.begin(); - size_t i = 0; - for (; it != hashset.end(); it++, i++) { - data[i] = *it; + { + np::raii::SaveThreadState save_thread_state{}; + + char *odata = PyArray_BYTES(res_obj); + npy_intp ostride = PyArray_STRIDES(res_obj)[0]; + for (auto it = hashset.begin(); it != hashset.end(); it++, odata += ostride) { + std::memcpy(odata, *it, itemsize); + } } - return res_obj; + return reinterpret_cast(res_obj); +} + +static PyObject* +unique_vstring(PyArrayObject *self, npy_bool equal_nan) +{ + /* + * Returns a new NumPy array containing the unique values of the input array. + * This function uses hashing to identify uniqueness efficiently. + */ + + auto hash = [equal_nan](const npy_static_string *value) -> size_t { + if (value->buf == NULL) { + if (equal_nan) { + return 0; + } else { + return std::hash{}(value); + } + } + return npy_fnv1a(value->buf, value->size * sizeof(char)); + }; + auto equal = [equal_nan](const npy_static_string *lhs, const npy_static_string *rhs) -> bool { + if (lhs->buf == NULL && rhs->buf == NULL) { + if (equal_nan) { + return true; + } else { + return lhs == rhs; + } + } + if (lhs->buf == NULL || rhs->buf == NULL) { + return false; + } + if (lhs->size != rhs->size) { + return false; + } + return std::memcmp(lhs->buf, rhs->buf, lhs->size) == 0; + }; + + npy_intp isize = PyArray_SIZE(self); + // unpacked_strings must live as long as hashset because hashset points + // to values in this vector. + std::vector unpacked_strings(isize, {0, NULL}); + + using set_type = std::unordered_set; + set_type hashset(std::min(isize, HASH_TABLE_INITIAL_BUCKETS), hash, equal); + + { + PyArray_StringDTypeObject *descr = + reinterpret_cast(PyArray_DESCR(self)); + np::raii::NpyStringAcquireAllocator alloc(descr); + np::raii::SaveThreadState save_thread_state{}; + + char *idata = PyArray_BYTES(self); + npy_intp istride = PyArray_STRIDES(self)[0]; + + for (npy_intp i = 0; i < isize; i++, idata += istride) { + npy_packed_static_string *packed_string = + reinterpret_cast(idata); + int is_null = NpyString_load(alloc.allocator(), packed_string, + &unpacked_strings[i]); + if (is_null == -1) { + // Unexpected error. Throw a C++ exception that will be caught + // by the caller of unique_vstring() and converted into a Python + // RuntimeError. + throw std::runtime_error("Failed to load string from packed " + "static string."); + } + hashset.insert(&unpacked_strings[i]); + } + } + + PyArrayObject *res_obj = empty_array_like(self, hashset.size()); + if (res_obj == NULL) { + return NULL; + } + + { + PyArray_StringDTypeObject *res_descr = + reinterpret_cast(PyArray_DESCR(res_obj)); + np::raii::NpyStringAcquireAllocator alloc(res_descr); + np::raii::SaveThreadState save_thread_state{}; + + char *odata = PyArray_BYTES(res_obj); + npy_intp ostride = PyArray_STRIDES(res_obj)[0]; + for (auto it = hashset.begin(); it != hashset.end(); it++, odata += ostride) { + npy_packed_static_string *packed_string = + reinterpret_cast(odata); + int pack_status = 0; + if ((*it)->buf == NULL) { + pack_status = NpyString_pack_null(alloc.allocator(), packed_string); + } else { + pack_status = NpyString_pack(alloc.allocator(), packed_string, + (*it)->buf, (*it)->size); + } + if (pack_status == -1) { + // string packing failed + return NULL; + } + } + } + return reinterpret_cast(res_obj); } // this map contains the functions used for each item size. -typedef std::function function_type; +typedef std::function function_type; std::unordered_map unique_funcs = { - {NPY_BYTE, unique}, - {NPY_UBYTE, unique}, - {NPY_SHORT, unique}, - {NPY_USHORT, unique}, - {NPY_INT, unique}, - {NPY_UINT, unique}, - {NPY_LONG, unique}, - {NPY_ULONG, unique}, - {NPY_LONGLONG, unique}, - {NPY_ULONGLONG, unique}, - {NPY_INT8, unique}, - {NPY_INT16, unique}, - {NPY_INT32, unique}, - {NPY_INT64, unique}, - {NPY_UINT8, unique}, - {NPY_UINT16, unique}, - {NPY_UINT32, unique}, - {NPY_UINT64, unique}, - {NPY_DATETIME, unique}, + {NPY_BYTE, unique_numeric, equal_integer, copy_integer>}, + {NPY_UBYTE, unique_numeric, equal_integer, copy_integer>}, + {NPY_SHORT, unique_numeric, equal_integer, copy_integer>}, + {NPY_USHORT, unique_numeric, equal_integer, copy_integer>}, + {NPY_INT, unique_numeric, equal_integer, copy_integer>}, + {NPY_UINT, unique_numeric, equal_integer, copy_integer>}, + {NPY_LONG, unique_numeric, equal_integer, copy_integer>}, + {NPY_ULONG, unique_numeric, equal_integer, copy_integer>}, + {NPY_LONGLONG, unique_numeric, equal_integer, copy_integer>}, + {NPY_ULONGLONG, unique_numeric, equal_integer, copy_integer>}, + {NPY_CFLOAT, unique_numeric< + npy_cfloat, + hash_complex, + equal_complex, + copy_complex + > + }, + {NPY_CDOUBLE, unique_numeric< + npy_cdouble, + hash_complex, + equal_complex, + copy_complex + > + }, + {NPY_CLONGDOUBLE, unique_numeric< + npy_clongdouble, + hash_complex_clongdouble, + equal_complex, + copy_complex + > + }, + {NPY_INT8, unique_numeric, equal_integer, copy_integer>}, + {NPY_INT16, unique_numeric, equal_integer, copy_integer>}, + {NPY_INT32, unique_numeric, equal_integer, copy_integer>}, + {NPY_INT64, unique_numeric, equal_integer, copy_integer>}, + {NPY_UINT8, unique_numeric, equal_integer, copy_integer>}, + {NPY_UINT16, unique_numeric, equal_integer, copy_integer>}, + {NPY_UINT32, unique_numeric, equal_integer, copy_integer>}, + {NPY_UINT64, unique_numeric, equal_integer, copy_integer>}, + {NPY_DATETIME, unique_numeric, equal_integer, copy_integer>}, + {NPY_COMPLEX64, unique_numeric< + npy_complex64, + hash_complex, + equal_complex, + copy_complex + > + }, + {NPY_COMPLEX128, unique_numeric< + npy_complex128, + hash_complex, + equal_complex, + copy_complex + > + }, + {NPY_STRING, unique_string}, + {NPY_UNICODE, unique_string}, + {NPY_VSTRING, unique_vstring}, }; @@ -154,30 +486,40 @@ std::unordered_map unique_funcs = { * type is unsupported or `NULL` with an error set. */ extern "C" NPY_NO_EXPORT PyObject * -array__unique_hash(PyObject *NPY_UNUSED(module), PyObject *arr_obj) +array__unique_hash(PyObject *NPY_UNUSED(module), + PyObject *const *args, Py_ssize_t len_args, PyObject *kwnames) { - if (!PyArray_Check(arr_obj)) { - PyErr_SetString(PyExc_TypeError, - "_unique_hash() requires a NumPy array input."); + PyArrayObject *arr = NULL; + npy_bool equal_nan = NPY_TRUE; // default to True + + NPY_PREPARE_ARGPARSER; + if (npy_parse_arguments("_unique_hash", args, len_args, kwnames, + "arr", &PyArray_Converter, &arr, + "|equal_nan", &PyArray_BoolConverter, &equal_nan, + NULL, NULL, NULL + ) < 0 + ) { + Py_XDECREF(arr); return NULL; } - PyArrayObject *arr = (PyArrayObject *)arr_obj; + PyObject *result = NULL; try { auto type = PyArray_TYPE(arr); // we only support data types present in our unique_funcs map if (unique_funcs.find(type) == unique_funcs.end()) { Py_RETURN_NOTIMPLEMENTED; } - - return unique_funcs[type](arr); + result = unique_funcs[type](arr, equal_nan); } catch (const std::bad_alloc &e) { PyErr_NoMemory(); - return NULL; + result = NULL; } catch (const std::exception &e) { PyErr_SetString(PyExc_RuntimeError, e.what()); - return NULL; + result = NULL; } + Py_DECREF(arr); + return result; } diff --git a/numpy/_core/src/multiarray/unique.h b/numpy/_core/src/multiarray/unique.h index 3e258405e8f4..7b3fb143ada4 100644 --- a/numpy/_core/src/multiarray/unique.h +++ b/numpy/_core/src/multiarray/unique.h @@ -5,7 +5,8 @@ extern "C" { #endif -PyObject* array__unique_hash(PyObject *NPY_UNUSED(dummy), PyObject *args); +PyObject* array__unique_hash(PyObject *NPY_UNUSED(dummy), + PyObject *const *args, Py_ssize_t len_args, PyObject *kwnames); #ifdef __cplusplus } diff --git a/numpy/_core/src/multiarray/usertypes.c b/numpy/_core/src/multiarray/usertypes.c index 445f7ad7fe67..71c95a8ae39c 100644 --- a/numpy/_core/src/multiarray/usertypes.c +++ b/numpy/_core/src/multiarray/usertypes.c @@ -306,8 +306,9 @@ PyArray_RegisterDataType(PyArray_DescrProto *descr_proto) descr->type_num = typenum; /* update prototype to notice duplicate registration */ descr_proto->type_num = typenum; - if (dtypemeta_wrap_legacy_descriptor( - descr, descr_proto->f, &PyArrayDescr_Type, name, NULL) < 0) { + PyArray_DTypeMeta *wrapped_dtype = dtypemeta_wrap_legacy_descriptor( + descr, descr_proto->f, &PyArrayDescr_Type, name, NULL); + if (wrapped_dtype == NULL) { descr->type_num = -1; NPY_NUMUSERTYPES--; /* Override the type, it might be wrong and then decref crashes */ @@ -344,7 +345,7 @@ static int _warn_if_cast_exists_already( if (to_DType == NULL) { return -1; } - PyObject *cast_impl = PyDict_GetItemWithError( + PyObject *cast_impl = PyDict_GetItemWithError( // noqa: borrowed-ref OK NPY_DT_SLOTS(NPY_DTYPE(descr))->castingimpls, (PyObject *)to_DType); Py_DECREF(to_DType); if (cast_impl == NULL) { diff --git a/numpy/_core/src/npymath/halffloat.cpp b/numpy/_core/src/npymath/halffloat.cpp index aa582c1b9517..9289a659f5f5 100644 --- a/numpy/_core/src/npymath/halffloat.cpp +++ b/numpy/_core/src/npymath/halffloat.cpp @@ -198,41 +198,21 @@ npy_half npy_half_divmod(npy_half h1, npy_half h2, npy_half *modulus) npy_uint16 npy_floatbits_to_halfbits(npy_uint32 f) { - if constexpr (Half::kNativeConversion) { - return BitCast(Half(BitCast(f))); - } - else { - return half_private::FromFloatBits(f); - } + return BitCast(Half(BitCast(f))); } npy_uint16 npy_doublebits_to_halfbits(npy_uint64 d) { - if constexpr (Half::kNativeConversion) { - return BitCast(Half(BitCast(d))); - } - else { - return half_private::FromDoubleBits(d); - } + return BitCast(Half(BitCast(d))); } npy_uint32 npy_halfbits_to_floatbits(npy_uint16 h) { - if constexpr (Half::kNativeConversion) { - return BitCast(static_cast(Half::FromBits(h))); - } - else { - return half_private::ToFloatBits(h); - } + return BitCast(static_cast(Half::FromBits(h))); } npy_uint64 npy_halfbits_to_doublebits(npy_uint16 h) { - if constexpr (Half::kNativeConversion) { - return BitCast(static_cast(Half::FromBits(h))); - } - else { - return half_private::ToDoubleBits(h); - } + return BitCast(static_cast(Half::FromBits(h))); } diff --git a/numpy/_core/src/npysort/mergesort.cpp b/numpy/_core/src/npysort/mergesort.cpp index 2fac0ccfafcd..1cfe04b1d266 100644 --- a/numpy/_core/src/npysort/mergesort.cpp +++ b/numpy/_core/src/npysort/mergesort.cpp @@ -337,7 +337,7 @@ string_amergesort_(type *v, npy_intp *tosort, npy_intp num, void *varr) static void npy_mergesort0(char *pl, char *pr, char *pw, char *vp, npy_intp elsize, - PyArray_CompareFunc *cmp, PyArrayObject *arr) + PyArray_CompareFunc *cmp, void *arr) { char *pi, *pj, *pk, *pm; @@ -383,9 +383,19 @@ npy_mergesort0(char *pl, char *pr, char *pw, char *vp, npy_intp elsize, NPY_NO_EXPORT int npy_mergesort(void *start, npy_intp num, void *varr) { - PyArrayObject *arr = (PyArrayObject *)varr; - npy_intp elsize = PyArray_ITEMSIZE(arr); - PyArray_CompareFunc *cmp = PyDataType_GetArrFuncs(PyArray_DESCR(arr))->compare; + void *arr = varr; + npy_intp elsize; + PyArray_CompareFunc *cmp; + get_sort_data_from_array(arr, &elsize, &cmp); + + return npy_mergesort_impl(start, num, varr, elsize, cmp); +} + +NPY_NO_EXPORT int +npy_mergesort_impl(void *start, npy_intp num, void *varr, + npy_intp elsize, PyArray_CompareFunc *cmp) +{ + void *arr = varr; char *pl = (char *)start; char *pr = pl + num * elsize; char *pw; @@ -413,7 +423,7 @@ npy_mergesort(void *start, npy_intp num, void *varr) static void npy_amergesort0(npy_intp *pl, npy_intp *pr, char *v, npy_intp *pw, - npy_intp elsize, PyArray_CompareFunc *cmp, PyArrayObject *arr) + npy_intp elsize, PyArray_CompareFunc *cmp, void *arr) { char *vp; npy_intp vi, *pi, *pj, *pk, *pm; @@ -459,9 +469,19 @@ npy_amergesort0(npy_intp *pl, npy_intp *pr, char *v, npy_intp *pw, NPY_NO_EXPORT int npy_amergesort(void *v, npy_intp *tosort, npy_intp num, void *varr) { - PyArrayObject *arr = (PyArrayObject *)varr; - npy_intp elsize = PyArray_ITEMSIZE(arr); - PyArray_CompareFunc *cmp = PyDataType_GetArrFuncs(PyArray_DESCR(arr))->compare; + void *arr = varr; + npy_intp elsize; + PyArray_CompareFunc *cmp; + get_sort_data_from_array(arr, &elsize, &cmp); + + return npy_amergesort_impl(v, tosort, num, varr, elsize, cmp); +} + +NPY_NO_EXPORT int +npy_amergesort_impl(void *v, npy_intp *tosort, npy_intp num, void *varr, + npy_intp elsize, PyArray_CompareFunc *cmp) +{ + void *arr = varr; npy_intp *pl, *pr, *pw; /* Items that have zero size don't make sense to sort */ diff --git a/numpy/_core/src/npysort/npysort_common.h b/numpy/_core/src/npysort/npysort_common.h index 0680ae52afe3..f2b99e3b7f66 100644 --- a/numpy/_core/src/npysort/npysort_common.h +++ b/numpy/_core/src/npysort/npysort_common.h @@ -1,8 +1,8 @@ #ifndef __NPY_SORT_COMMON_H__ #define __NPY_SORT_COMMON_H__ -#include #include +#include #include #include "dtypemeta.h" @@ -40,6 +40,20 @@ extern "C" { /* Need this for the argsort functions */ #define INTP_SWAP(a,b) {npy_intp tmp = (b); (b)=(a); (a) = tmp;} +/* + ****************************************************************************** + ** SORTING WRAPPERS ** + ****************************************************************************** + */ + +static inline void +get_sort_data_from_array(void *varr, npy_intp *elsize, PyArray_CompareFunc **cmp) +{ + PyArrayObject *arr = (PyArrayObject *)varr; + *elsize = PyArray_ITEMSIZE(arr); + *cmp = PyDataType_GetArrFuncs(PyArray_DESCR(arr))->compare; +} + /* ***************************************************************************** ** COMPARISON FUNCTIONS ** diff --git a/numpy/_core/src/npysort/quicksort.cpp b/numpy/_core/src/npysort/quicksort.cpp index ddf4fce0c28b..2f5adde17b64 100644 --- a/numpy/_core/src/npysort/quicksort.cpp +++ b/numpy/_core/src/npysort/quicksort.cpp @@ -508,9 +508,18 @@ string_aquicksort_(type *vv, npy_intp *tosort, npy_intp num, void *varr) NPY_NO_EXPORT int npy_quicksort(void *start, npy_intp num, void *varr) { - PyArrayObject *arr = (PyArrayObject *)varr; - npy_intp elsize = PyArray_ITEMSIZE(arr); - PyArray_CompareFunc *cmp = PyDataType_GetArrFuncs(PyArray_DESCR(arr))->compare; + npy_intp elsize; + PyArray_CompareFunc *cmp; + get_sort_data_from_array(varr, &elsize, &cmp); + + return npy_quicksort_impl(start, num, varr, elsize, cmp); +} + +NPY_NO_EXPORT int +npy_quicksort_impl(void *start, npy_intp num, void *varr, + npy_intp elsize, PyArray_CompareFunc *cmp) +{ + void *arr = varr; char *vp; char *pl = (char *)start; char *pr = pl + (num - 1) * elsize; @@ -612,10 +621,19 @@ npy_quicksort(void *start, npy_intp num, void *varr) NPY_NO_EXPORT int npy_aquicksort(void *vv, npy_intp *tosort, npy_intp num, void *varr) { + npy_intp elsize; + PyArray_CompareFunc *cmp; + get_sort_data_from_array(varr, &elsize, &cmp); + + return npy_aquicksort_impl(vv, tosort, num, varr, elsize, cmp); +} + +NPY_NO_EXPORT int +npy_aquicksort_impl(void *vv, npy_intp *tosort, npy_intp num, void *varr, + npy_intp elsize, PyArray_CompareFunc *cmp) +{ + void *arr = varr; char *v = (char *)vv; - PyArrayObject *arr = (PyArrayObject *)varr; - npy_intp elsize = PyArray_ITEMSIZE(arr); - PyArray_CompareFunc *cmp = PyDataType_GetArrFuncs(PyArray_DESCR(arr))->compare; char *vp; npy_intp *pl = tosort; npy_intp *pr = tosort + num - 1; diff --git a/numpy/_core/src/npysort/timsort.cpp b/numpy/_core/src/npysort/timsort.cpp index 0f0f5721e7cf..0dfb4d32f64a 100644 --- a/numpy/_core/src/npysort/timsort.cpp +++ b/numpy/_core/src/npysort/timsort.cpp @@ -39,8 +39,9 @@ #include #include -/* enough for 32 * 1.618 ** 128 elements */ -#define TIMSORT_STACK_SIZE 128 +/* enough for 32 * 1.618 ** 128 elements. + If powersort was used in all cases, 90 would suffice, as 32 * 2 ** 90 >= 32 * 1.618 ** 128 */ +#define RUN_STACK_SIZE 128 static npy_intp compute_min_run(npy_intp num) @@ -58,6 +59,7 @@ compute_min_run(npy_intp num) typedef struct { npy_intp s; /* start pointer */ npy_intp l; /* length */ + int power; /* node "level" for powersort merge strategy */ } run; /* buffer for argsort. Declared here to avoid multiple declarations. */ @@ -383,60 +385,51 @@ merge_at_(type *arr, const run *stack, const npy_intp at, buffer_ *buffer) return 0; } -template +/* See https://github.com/python/cpython/blob/ea23c897cd25702e72a04e06664f6864f07a7c5d/Objects/listsort.txt +* for a detailed explanation. +* In CPython, *num* is called *n*, but we changed it for consistency with the NumPy implementation. +*/ static int -try_collapse_(type *arr, run *stack, npy_intp *stack_ptr, buffer_ *buffer) +powerloop(npy_intp s1, npy_intp n1, npy_intp n2, npy_intp num) { - int ret; - npy_intp A, B, C, top; - top = *stack_ptr; - - while (1 < top) { - B = stack[top - 2].l; - C = stack[top - 1].l; - - if ((2 < top && stack[top - 3].l <= B + C) || - (3 < top && stack[top - 4].l <= stack[top - 3].l + B)) { - A = stack[top - 3].l; - - if (A <= C) { - ret = merge_at_(arr, stack, top - 3, buffer); - - if (NPY_UNLIKELY(ret < 0)) { - return ret; - } - - stack[top - 3].l += B; - stack[top - 2] = stack[top - 1]; - --top; - } - else { - ret = merge_at_(arr, stack, top - 2, buffer); - - if (NPY_UNLIKELY(ret < 0)) { - return ret; - } - - stack[top - 2].l += C; - --top; - } + int result = 0; + npy_intp a = 2 * s1 + n1; /* 2*a */ + npy_intp b = a + n1 + n2; /* 2*b */ + for (;;) { + ++result; + if (a >= num) { /* both quotient bits are 1 */ + a -= num; + b -= num; } - else if (1 < top && B <= C) { - ret = merge_at_(arr, stack, top - 2, buffer); + else if (b >= num) { /* a/num bit is 0, b/num bit is 1 */ + break; + } + a <<= 1; + b <<= 1; + } + return result; +} +template +static int +found_new_run_(type *arr, run *stack, npy_intp *stack_ptr, npy_intp n2, + npy_intp num, buffer_ *buffer) +{ + int ret; + if (*stack_ptr > 0) { + npy_intp s1 = stack[*stack_ptr - 1].s; + npy_intp n1 = stack[*stack_ptr - 1].l; + int power = powerloop(s1, n1, n2, num); + while (*stack_ptr > 1 && stack[*stack_ptr - 2].power > power) { + ret = merge_at_(arr, stack, *stack_ptr - 2, buffer); if (NPY_UNLIKELY(ret < 0)) { return ret; } - - stack[top - 2].l += C; - --top; - } - else { - break; + stack[*stack_ptr - 2].l += stack[*stack_ptr - 1].l; + --(*stack_ptr); } + stack[*stack_ptr - 1].power = power; } - - *stack_ptr = top; return 0; } @@ -491,7 +484,7 @@ timsort_(void *start, npy_intp num) int ret; npy_intp l, n, stack_ptr, minrun; buffer_ buffer; - run stack[TIMSORT_STACK_SIZE]; + run stack[RUN_STACK_SIZE]; buffer.pw = NULL; buffer.size = 0; stack_ptr = 0; @@ -499,15 +492,14 @@ timsort_(void *start, npy_intp num) for (l = 0; l < num;) { n = count_run_((type *)start, l, num, minrun); + ret = found_new_run_((type *)start, stack, &stack_ptr, n, num, &buffer); + if (NPY_UNLIKELY(ret < 0)) + goto cleanup; + + // Push the new run onto the stack. stack[stack_ptr].s = l; stack[stack_ptr].l = n; ++stack_ptr; - ret = try_collapse_((type *)start, stack, &stack_ptr, &buffer); - - if (NPY_UNLIKELY(ret < 0)) { - goto cleanup; - } - l += n; } @@ -790,59 +782,24 @@ amerge_at_(type *arr, npy_intp *tosort, const run *stack, const npy_intp at, template static int -atry_collapse_(type *arr, npy_intp *tosort, run *stack, npy_intp *stack_ptr, - buffer_intp *buffer) +afound_new_run_(type *arr, npy_intp *tosort, run *stack, npy_intp *stack_ptr, npy_intp n2, + npy_intp num, buffer_intp *buffer) { int ret; - npy_intp A, B, C, top; - top = *stack_ptr; - - while (1 < top) { - B = stack[top - 2].l; - C = stack[top - 1].l; - - if ((2 < top && stack[top - 3].l <= B + C) || - (3 < top && stack[top - 4].l <= stack[top - 3].l + B)) { - A = stack[top - 3].l; - - if (A <= C) { - ret = amerge_at_(arr, tosort, stack, top - 3, buffer); - - if (NPY_UNLIKELY(ret < 0)) { - return ret; - } - - stack[top - 3].l += B; - stack[top - 2] = stack[top - 1]; - --top; - } - else { - ret = amerge_at_(arr, tosort, stack, top - 2, buffer); - - if (NPY_UNLIKELY(ret < 0)) { - return ret; - } - - stack[top - 2].l += C; - --top; - } - } - else if (1 < top && B <= C) { - ret = amerge_at_(arr, tosort, stack, top - 2, buffer); - + if (*stack_ptr > 0) { + npy_intp s1 = stack[*stack_ptr - 1].s; + npy_intp n1 = stack[*stack_ptr - 1].l; + int power = powerloop(s1, n1, n2, num); + while (*stack_ptr > 1 && stack[*stack_ptr - 2].power > power) { + ret = amerge_at_(arr, tosort, stack, *stack_ptr - 2, buffer); if (NPY_UNLIKELY(ret < 0)) { return ret; } - - stack[top - 2].l += C; - --top; - } - else { - break; + stack[*stack_ptr - 2].l += stack[*stack_ptr - 1].l; + --(*stack_ptr); } + stack[*stack_ptr - 1].power = power; } - - *stack_ptr = top; return 0; } @@ -897,7 +854,7 @@ atimsort_(void *v, npy_intp *tosort, npy_intp num) int ret; npy_intp l, n, stack_ptr, minrun; buffer_intp buffer; - run stack[TIMSORT_STACK_SIZE]; + run stack[RUN_STACK_SIZE]; buffer.pw = NULL; buffer.size = 0; stack_ptr = 0; @@ -905,16 +862,13 @@ atimsort_(void *v, npy_intp *tosort, npy_intp num) for (l = 0; l < num;) { n = acount_run_((type *)v, tosort, l, num, minrun); - stack[stack_ptr].s = l; - stack[stack_ptr].l = n; - ++stack_ptr; - ret = atry_collapse_((type *)v, tosort, stack, &stack_ptr, - &buffer); - + ret = afound_new_run_((type*)v, tosort, stack, &stack_ptr, n, num, &buffer); if (NPY_UNLIKELY(ret < 0)) { goto cleanup; } - + stack[stack_ptr].s = l; + stack[stack_ptr].l = n; + ++stack_ptr; l += n; } @@ -1371,7 +1325,7 @@ string_timsort_(void *start, npy_intp num, void *varr) size_t len = elsize / sizeof(type); int ret; npy_intp l, n, stack_ptr, minrun; - run stack[TIMSORT_STACK_SIZE]; + run stack[RUN_STACK_SIZE]; string_buffer_ buffer; /* Items that have zero size don't make sense to sort */ @@ -1800,7 +1754,7 @@ string_atimsort_(void *start, npy_intp *tosort, npy_intp num, void *varr) size_t len = elsize / sizeof(type); int ret; npy_intp l, n, stack_ptr, minrun; - run stack[TIMSORT_STACK_SIZE]; + run stack[RUN_STACK_SIZE]; buffer_intp buffer; /* Items that have zero size don't make sense to sort */ @@ -2253,7 +2207,7 @@ npy_timsort(void *start, npy_intp num, void *varr) PyArray_CompareFunc *cmp = PyDataType_GetArrFuncs(PyArray_DESCR(arr))->compare; int ret; npy_intp l, n, stack_ptr, minrun; - run stack[TIMSORT_STACK_SIZE]; + run stack[RUN_STACK_SIZE]; buffer_char buffer; /* Items that have zero size don't make sense to sort */ @@ -2689,7 +2643,7 @@ npy_atimsort(void *start, npy_intp *tosort, npy_intp num, void *varr) PyArray_CompareFunc *cmp = PyDataType_GetArrFuncs(PyArray_DESCR(arr))->compare; int ret; npy_intp l, n, stack_ptr, minrun; - run stack[TIMSORT_STACK_SIZE]; + run stack[RUN_STACK_SIZE]; buffer_intp buffer; /* Items that have zero size don't make sense to sort */ diff --git a/numpy/_core/src/npysort/x86-simd-sort b/numpy/_core/src/npysort/x86-simd-sort index c306ac581a59..6a7a01da4b0d 160000 --- a/numpy/_core/src/npysort/x86-simd-sort +++ b/numpy/_core/src/npysort/x86-simd-sort @@ -1 +1 @@ -Subproject commit c306ac581a59f89585d778254c4ed7197e64ba2d +Subproject commit 6a7a01da4b0dfde108aa626a2364c954e2c50fe1 diff --git a/numpy/_core/src/umath/_operand_flag_tests.c b/numpy/_core/src/umath/_operand_flag_tests.c index 9747b7946512..c97668c4b118 100644 --- a/numpy/_core/src/umath/_operand_flag_tests.c +++ b/numpy/_core/src/umath/_operand_flag_tests.c @@ -57,14 +57,14 @@ PyMODINIT_FUNC PyInit__operand_flag_tests(void) PyObject *m = NULL; PyObject *ufunc; + import_array(); + import_umath(); + m = PyModule_Create(&moduledef); if (m == NULL) { goto fail; } - import_array(); - import_umath(); - ufunc = PyUFunc_FromFuncAndData(funcs, data, types, 1, 2, 0, PyUFunc_None, "inplace_add", "inplace_add_docstring", 0); @@ -77,7 +77,7 @@ PyMODINIT_FUNC PyInit__operand_flag_tests(void) ((PyUFuncObject*)ufunc)->iter_flags = NPY_ITER_REDUCE_OK; PyModule_AddObject(m, "inplace_add", (PyObject*)ufunc); -#if Py_GIL_DISABLED +#ifdef Py_GIL_DISABLED // signal this module supports running with the GIL disabled PyUnstable_Module_SetGIL(m, Py_MOD_GIL_NOT_USED); #endif diff --git a/numpy/_core/src/umath/_rational_tests.c b/numpy/_core/src/umath/_rational_tests.c index a95c89b373df..d257bc22d051 100644 --- a/numpy/_core/src/umath/_rational_tests.c +++ b/numpy/_core/src/umath/_rational_tests.c @@ -1355,7 +1355,7 @@ PyMODINIT_FUNC PyInit__rational_tests(void) { GCD_LCM_UFUNC(gcd,NPY_INT64,"greatest common denominator of two integers"); GCD_LCM_UFUNC(lcm,NPY_INT64,"least common multiple of two integers"); -#if Py_GIL_DISABLED +#ifdef Py_GIL_DISABLED // signal this module supports running with the GIL disabled PyUnstable_Module_SetGIL(m, Py_MOD_GIL_NOT_USED); #endif diff --git a/numpy/_core/src/umath/_scaled_float_dtype.c b/numpy/_core/src/umath/_scaled_float_dtype.c index fbdbbb8d2375..020e903b5fc8 100644 --- a/numpy/_core/src/umath/_scaled_float_dtype.c +++ b/numpy/_core/src/umath/_scaled_float_dtype.c @@ -21,6 +21,7 @@ #include "array_method.h" #include "common.h" #include "numpy/npy_math.h" +#include "npy_sort.h" #include "convert_datatype.h" #include "dtypemeta.h" #include "dispatching.h" @@ -107,7 +108,7 @@ sfloat_getitem(char *data, PyArrayObject *arr) static int -sfloat_setitem(PyObject *obj, char *data, PyArrayObject *arr) +sfloat_setitem(PyArray_Descr *descr_, PyObject *obj, char *data) { if (!PyFloat_CheckExact(obj)) { PyErr_SetString(PyExc_NotImplementedError, @@ -115,7 +116,7 @@ sfloat_setitem(PyObject *obj, char *data, PyArrayObject *arr) return -1; } - PyArray_SFloatDescr *descr = (PyArray_SFloatDescr *)PyArray_DESCR(arr); + PyArray_SFloatDescr *descr = (PyArray_SFloatDescr *)descr_; double value = PyFloat_AsDouble(obj); value /= descr->scaling; @@ -131,9 +132,10 @@ NPY_DType_Slots sfloat_slots = { .default_descr = &sfloat_default_descr, .common_dtype = &sfloat_common_dtype, .common_instance = &sfloat_common_instance, + .setitem = &sfloat_setitem, .f = { .getitem = (PyArray_GetItemFunc *)&sfloat_getitem, - .setitem = (PyArray_SetItemFunc *)&sfloat_setitem, + .setitem = NULL, } }; @@ -775,65 +777,272 @@ promote_to_sfloat(PyUFuncObject *NPY_UNUSED(ufunc), } +NPY_NO_EXPORT int +sfloat_stable_sort_loop( + PyArrayMethod_Context *context, + char *const *data, + const npy_intp *dimensions, + const npy_intp *strides, + NpyAuxData *NPY_UNUSED(auxdata)) +{ + assert(data[0] == data[1]); + assert(strides[0] == sizeof(npy_float64) && strides[1] == sizeof(npy_float64)); + assert(((PyArrayMethod_SortParameters *)context->parameters)->flags == NPY_SORT_STABLE); + + npy_intp N = dimensions[0]; + char *in = data[0]; + + return timsort_double(in, N, NULL); +} + + +NPY_NO_EXPORT int +sfloat_default_sort_loop( + PyArrayMethod_Context *context, + char *const *data, + const npy_intp *dimensions, + const npy_intp *strides, + NpyAuxData *NPY_UNUSED(auxdata)) +{ + assert(data[0] == data[1]); + assert(strides[0] == sizeof(npy_float64) && strides[1] == sizeof(npy_float64)); + assert(((PyArrayMethod_SortParameters *)context->parameters)->flags == NPY_SORT_DEFAULT); + + npy_intp N = dimensions[0]; + char *in = data[0]; + + return quicksort_double(in, N, NULL); +} + + +NPY_NO_EXPORT int +sfloat_sort_get_loop( + PyArrayMethod_Context *context, + int aligned, int move_references, + const npy_intp *strides, + PyArrayMethod_StridedLoop **out_loop, + NpyAuxData **out_transferdata, + NPY_ARRAYMETHOD_FLAGS *flags) +{ + PyArrayMethod_SortParameters *parameters = (PyArrayMethod_SortParameters *)context->parameters; + + if (PyDataType_FLAGCHK(context->descriptors[0], NPY_NEEDS_PYAPI)) { + *flags |= NPY_METH_REQUIRES_PYAPI; + } + + if (parameters->flags == NPY_SORT_STABLE) { + *out_loop = (PyArrayMethod_StridedLoop *)sfloat_stable_sort_loop; + } + else if (parameters->flags == NPY_SORT_DEFAULT) { + *out_loop = (PyArrayMethod_StridedLoop *)sfloat_default_sort_loop; + } + else { + PyErr_SetString(PyExc_RuntimeError, "unsupported sort kind"); + return -1; + } + return 0; +} + + +static NPY_CASTING +sfloat_sort_resolve_descriptors( + PyArrayMethodObject *NPY_UNUSED(self), + PyArray_DTypeMeta *NPY_UNUSED(dtypes[2]), + PyArray_Descr *given_descrs[2], + PyArray_Descr *loop_descrs[2], + npy_intp *view_offset) +{ + assert(!(given_descrs[1] != given_descrs[0] && given_descrs[1] != NULL)); + assert(PyArray_IsNativeByteOrder(given_descrs[0]->byteorder)); + + loop_descrs[0] = given_descrs[0]; + Py_INCREF(loop_descrs[0]); + loop_descrs[1] = loop_descrs[0]; + Py_INCREF(loop_descrs[1]); + + return NPY_NO_CASTING; +} + + +NPY_NO_EXPORT int +sfloat_stable_argsort_loop( + PyArrayMethod_Context *context, + char *const *data, + const npy_intp *dimensions, + const npy_intp *strides, + NpyAuxData *NPY_UNUSED(auxdata)) +{ + assert(((PyArrayMethod_SortParameters *)context->parameters)->flags == NPY_SORT_STABLE); + assert(strides[0] == sizeof(npy_float64)); + assert(strides[1] == sizeof(npy_intp)); + + npy_intp N = dimensions[0]; + char *in = data[0]; + npy_intp *out = (npy_intp *)data[1]; + + return atimsort_double(in, out, N, NULL); +} + + +NPY_NO_EXPORT int +sfloat_default_argsort_loop( + PyArrayMethod_Context *context, + char *const *data, + const npy_intp *dimensions, + const npy_intp *strides, + NpyAuxData *NPY_UNUSED(auxdata)) +{ + assert(((PyArrayMethod_SortParameters *)context->parameters)->flags == NPY_SORT_DEFAULT); + assert(strides[0] == sizeof(npy_float64)); + assert(strides[1] == sizeof(npy_intp)); + + npy_intp N = dimensions[0]; + char *in = data[0]; + npy_intp *out = (npy_intp *)data[1]; + + return aquicksort_double(in, out, N, NULL); +} + + +NPY_NO_EXPORT int +sfloat_argsort_get_loop( + PyArrayMethod_Context *context, + int aligned, int move_references, + const npy_intp *strides, + PyArrayMethod_StridedLoop **out_loop, + NpyAuxData **out_transferdata, + NPY_ARRAYMETHOD_FLAGS *flags) +{ + PyArrayMethod_SortParameters *parameters = (PyArrayMethod_SortParameters *)context->parameters; + + if (PyDataType_FLAGCHK(context->descriptors[0], NPY_NEEDS_PYAPI)) { + *flags |= NPY_METH_REQUIRES_PYAPI; + } + + if (parameters->flags == NPY_SORT_STABLE) { + *out_loop = (PyArrayMethod_StridedLoop *)sfloat_stable_argsort_loop; + } + else if (parameters->flags == NPY_SORT_DEFAULT) { + *out_loop = (PyArrayMethod_StridedLoop *)sfloat_default_argsort_loop; + } + else { + PyErr_SetString(PyExc_RuntimeError, "unsupported sort kind"); + return -1; + } + return 0; +} + + +NPY_NO_EXPORT NPY_CASTING +sfloat_argsort_resolve_descriptors( + PyArrayMethodObject *NPY_UNUSED(self), + PyArray_DTypeMeta *dtypes[2], + PyArray_Descr *given_descrs[2], + PyArray_Descr *loop_descrs[2], + npy_intp *view_offset) +{ + assert(given_descrs[1] == NULL || given_descrs[1]->type_num == NPY_INTP); + assert(PyArray_IsNativeByteOrder(given_descrs[0]->byteorder)); + + loop_descrs[0] = given_descrs[0]; + Py_INCREF(loop_descrs[0]); + loop_descrs[1] = PyArray_DescrFromType(NPY_INTP); + if (loop_descrs[1] == NULL) { + return -1; + } + return NPY_NO_CASTING; +} + + /* * Add new ufunc loops (this is somewhat clumsy as of writing it, but should * get less so with the introduction of public API). */ static int sfloat_init_ufuncs(void) { - PyArray_DTypeMeta *dtypes[3] = { + PyArray_DTypeMeta *all_sfloat_dtypes[3] = { &PyArray_SFloatDType, &PyArray_SFloatDType, &PyArray_SFloatDType}; - PyType_Slot slots[3] = {{0, NULL}}; - PyArrayMethod_Spec spec = { + PyType_Slot multiply_slots[3] = { + {NPY_METH_resolve_descriptors, &multiply_sfloats_resolve_descriptors}, + {NPY_METH_strided_loop, &multiply_sfloats}, + {0, NULL} + }; + PyArrayMethod_Spec multiply_spec = { .nin = 2, - .nout =1, - .dtypes = dtypes, - .slots = slots, + .nout = 1, + .dtypes = all_sfloat_dtypes, + .slots = multiply_slots, + .name = "sfloat_multiply", + .casting = NPY_NO_CASTING, }; - spec.name = "sfloat_multiply"; - spec.casting = NPY_NO_CASTING; - - slots[0].slot = NPY_METH_resolve_descriptors; - slots[0].pfunc = &multiply_sfloats_resolve_descriptors; - slots[1].slot = NPY_METH_strided_loop; - slots[1].pfunc = &multiply_sfloats; - PyBoundArrayMethodObject *bmeth = PyArrayMethod_FromSpec_int(&spec, 0); - if (bmeth == NULL) { - return -1; - } - int res = sfloat_add_loop("multiply", - bmeth->dtypes, (PyObject *)bmeth->method); - Py_DECREF(bmeth); - if (res < 0) { - return -1; - } - spec.name = "sfloat_add"; - spec.casting = NPY_SAME_KIND_CASTING; + PyType_Slot add_slots[3] = { + {NPY_METH_resolve_descriptors, &add_sfloats_resolve_descriptors}, + {NPY_METH_strided_loop, &add_sfloats}, + {0, NULL} + }; + PyArrayMethod_Spec add_spec = { + .nin = 2, + .nout = 1, + .dtypes = all_sfloat_dtypes, + .slots = add_slots, + .name = "sfloat_add", + .casting = NPY_SAME_KIND_CASTING, + }; - slots[0].slot = NPY_METH_resolve_descriptors; - slots[0].pfunc = &add_sfloats_resolve_descriptors; - slots[1].slot = NPY_METH_strided_loop; - slots[1].pfunc = &add_sfloats; - bmeth = PyArrayMethod_FromSpec_int(&spec, 0); - if (bmeth == NULL) { - return -1; - } - res = sfloat_add_loop("add", - bmeth->dtypes, (PyObject *)bmeth->method); - Py_DECREF(bmeth); - if (res < 0) { + PyArray_DTypeMeta *sort_dtypes[2] = {&PyArray_SFloatDType, &PyArray_SFloatDType}; + PyType_Slot sort_slots[3] = { + {NPY_METH_resolve_descriptors, &sfloat_sort_resolve_descriptors}, + {NPY_METH_get_loop, &sfloat_sort_get_loop}, + {0, NULL} + }; + PyArrayMethod_Spec sort_spec = { + .nin = 1, + .nout = 1, + .dtypes = sort_dtypes, + .slots = sort_slots, + }; + sort_spec.name = "sfloat_sort"; + sort_spec.casting = NPY_NO_CASTING; + sort_spec.flags = NPY_METH_NO_FLOATINGPOINT_ERRORS; + + PyArray_DTypeMeta *argsort_dtypes[2] = {&PyArray_SFloatDType, &PyArray_IntpDType}; + PyType_Slot argsort_slots[3] = { + {NPY_METH_resolve_descriptors, &sfloat_argsort_resolve_descriptors}, + {NPY_METH_get_loop, &sfloat_argsort_get_loop}, + {0, NULL} + }; + PyArrayMethod_Spec argsort_spec = { + .nin = 1, + .nout = 1, + .dtypes = argsort_dtypes, + .slots = argsort_slots, + }; + argsort_spec.name = "sfloat_argsort"; + argsort_spec.casting = NPY_NO_CASTING; + argsort_spec.flags = NPY_METH_NO_FLOATINGPOINT_ERRORS; + + /* here we chose weirdish names to test the lookup mechanism */ + PyUFunc_LoopSlot loops[] = { + {"multiply", &multiply_spec}, + {"_core._multiarray_umath.add", &add_spec}, + {"numpy:sort", &sort_spec}, + {"numpy._core.fromnumeric:argsort", &argsort_spec}, + {NULL, NULL} + }; + if (PyUFunc_AddLoopsFromSpecs(loops) < 0) { return -1; } /* N.B.: Wrapping isn't actually correct if scaling can be negative */ - if (sfloat_add_wrapping_loop("hypot", dtypes) < 0) { + if (sfloat_add_wrapping_loop("hypot", all_sfloat_dtypes) < 0) { return -1; } /* * Add a promoter for both directions of multiply with double. */ + int res = -1; PyArray_DTypeMeta *double_DType = &PyArray_DoubleDType; PyArray_DTypeMeta *promoter_dtypes[3] = { diff --git a/numpy/_core/src/umath/_struct_ufunc_tests.c b/numpy/_core/src/umath/_struct_ufunc_tests.c index 8edbdc00b6f3..e85c67f9d903 100644 --- a/numpy/_core/src/umath/_struct_ufunc_tests.c +++ b/numpy/_core/src/umath/_struct_ufunc_tests.c @@ -123,15 +123,15 @@ PyMODINIT_FUNC PyInit__struct_ufunc_tests(void) PyArray_Descr *dtype; PyArray_Descr *dtypes[3]; + import_array(); + import_umath(); + m = PyModule_Create(&moduledef); if (m == NULL) { return NULL; } - import_array(); - import_umath(); - add_triplet = PyUFunc_FromFuncAndData(NULL, NULL, NULL, 0, 2, 1, PyUFunc_None, "add_triplet", NULL, 0); @@ -157,7 +157,7 @@ PyMODINIT_FUNC PyInit__struct_ufunc_tests(void) PyDict_SetItemString(d, "add_triplet", add_triplet); Py_DECREF(add_triplet); -#if Py_GIL_DISABLED +#ifdef Py_GIL_DISABLED // signal this module supports running with the GIL disabled PyUnstable_Module_SetGIL(m, Py_MOD_GIL_NOT_USED); #endif diff --git a/numpy/_core/src/umath/_umath_tests.c.src b/numpy/_core/src/umath/_umath_tests.c.src index 9f2818d14526..a1b64ecc0444 100644 --- a/numpy/_core/src/umath/_umath_tests.c.src +++ b/numpy/_core/src/umath/_umath_tests.c.src @@ -460,6 +460,15 @@ addUfuncs(PyObject *dictionary) { } PyDict_SetItemString(dictionary, "always_error", f); Py_DECREF(f); + f = PyUFunc_FromFuncAndData(always_error_functions, always_error_data, + always_error_signatures, 1, 1, 1, PyUFunc_None, "always_error_unary", + "simply, broken, ufunc that sets an error (but releases the GIL).", + 0); + if (f == NULL) { + return -1; + } + PyDict_SetItemString(dictionary, "always_error_unary", f); + Py_DECREF(f); f = PyUFunc_FromFuncAndDataAndSignature(always_error_functions, always_error_data, always_error_signatures, 1, 2, 1, PyUFunc_None, "always_error_gufunc", @@ -944,7 +953,7 @@ PyMODINIT_FUNC PyInit__umath_tests(void) { // - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -#if Py_GIL_DISABLED +#ifdef Py_GIL_DISABLED // signal this module supports running with the GIL disabled PyUnstable_Module_SetGIL(m, Py_MOD_GIL_NOT_USED); #endif diff --git a/numpy/_core/src/umath/clip.cpp b/numpy/_core/src/umath/clip.cpp index e051692c6d48..127b019ef8ae 100644 --- a/numpy/_core/src/umath/clip.cpp +++ b/numpy/_core/src/umath/clip.cpp @@ -1,7 +1,6 @@ /** * This module provides the inner loops for the clip ufunc */ -#include #define _UMATHMODULE #define _MULTIARRAYMODULE @@ -10,6 +9,7 @@ #define PY_SSIZE_T_CLEAN #include +#include #include "numpy/halffloat.h" #include "numpy/ndarraytypes.h" #include "numpy/npy_common.h" diff --git a/numpy/_core/src/umath/dispatching.cpp b/numpy/_core/src/umath/dispatching.cpp index ba98a9b5c5d1..db5698d8a819 100644 --- a/numpy/_core/src/umath/dispatching.cpp +++ b/numpy/_core/src/umath/dispatching.cpp @@ -47,6 +47,7 @@ #include "numpy/ndarraytypes.h" #include "numpy/npy_3kcompat.h" +#include "npy_import.h" #include "common.h" #include "npy_pycompat.h" @@ -193,6 +194,75 @@ PyUFunc_AddLoopFromSpec_int(PyObject *ufunc, PyArrayMethod_Spec *spec, int priv) } +/*UFUNC_API + * Add multiple loops to ufuncs from ArrayMethod specs. This also + * handles the registration of sort and argsort methods for dtypes + * from ArrayMethod specs. + */ +NPY_NO_EXPORT int +PyUFunc_AddLoopsFromSpecs(PyUFunc_LoopSlot *slots) +{ + if (npy_cache_import_runtime( + "numpy", "sort", &npy_runtime_imports.sort) < 0) { + return -1; + } + if (npy_cache_import_runtime( + "numpy", "argsort", &npy_runtime_imports.argsort) < 0) { + return -1; + } + + PyUFunc_LoopSlot *slot; + for (slot = slots; slot->name != NULL; slot++) { + PyObject *ufunc = npy_import_entry_point(slot->name); + if (ufunc == NULL) { + return -1; + } + + if (ufunc == npy_runtime_imports.sort) { + Py_DECREF(ufunc); + + PyArray_DTypeMeta *dtype = slot->spec->dtypes[0]; + PyBoundArrayMethodObject *sort_meth = PyArrayMethod_FromSpec_int(slot->spec, 0); + if (sort_meth == NULL) { + return -1; + } + + NPY_DT_SLOTS(dtype)->sort_meth = sort_meth->method; + Py_INCREF(sort_meth->method); + Py_DECREF(sort_meth); + } + else if (ufunc == npy_runtime_imports.argsort) { + Py_DECREF(ufunc); + + PyArray_DTypeMeta *dtype = slot->spec->dtypes[0]; + PyBoundArrayMethodObject *argsort_meth = PyArrayMethod_FromSpec_int(slot->spec, 0); + if (argsort_meth == NULL) { + return -1; + } + + NPY_DT_SLOTS(dtype)->argsort_meth = argsort_meth->method; + Py_INCREF(argsort_meth->method); + Py_DECREF(argsort_meth); + } + else { + if (!PyObject_TypeCheck(ufunc, &PyUFunc_Type)) { + PyErr_Format(PyExc_TypeError, "%s was not a ufunc!", slot->name); + Py_DECREF(ufunc); + return -1; + } + + int ret = PyUFunc_AddLoopFromSpec_int(ufunc, slot->spec, 0); + Py_DECREF(ufunc); + if (ret < 0) { + return -1; + } + } + } + + return 0; +} + + /** * Resolves the implementation to use, this uses typical multiple dispatching * methods of finding the best matching implementation or resolver. @@ -912,13 +982,9 @@ promote_and_get_info_and_ufuncimpl_with_locking( npy_bool legacy_promotion_is_possible) { std::shared_mutex *mutex = ((std::shared_mutex *)((PyArrayIdentityHash *)ufunc->_dispatch_cache)->mutex); - NPY_BEGIN_ALLOW_THREADS - mutex->lock_shared(); - NPY_END_ALLOW_THREADS - PyObject *info = PyArrayIdentityHash_GetItem( + PyObject *info = PyArrayIdentityHash_GetItemWithLock( (PyArrayIdentityHash *)ufunc->_dispatch_cache, (PyObject **)op_dtypes); - mutex->unlock_shared(); if (info != NULL && PyObject_TypeCheck( PyTuple_GET_ITEM(info, 1), &PyArrayMethod_Type)) { @@ -1027,6 +1093,13 @@ promote_and_get_ufuncimpl(PyUFuncObject *ufunc, } } + /* + * We hold the GIL here, so on the GIL-enabled build the GIL prevents + * races to fill the promotion cache. + * + * On the free-threaded build we need to set up our own locking to prevent + * races to fill the promotion cache. + */ #ifdef Py_GIL_DISABLED PyObject *info = promote_and_get_info_and_ufuncimpl_with_locking(ufunc, ops, signature, op_dtypes, legacy_promotion_is_possible); diff --git a/numpy/_core/src/umath/dispatching.h b/numpy/_core/src/umath/dispatching.h index 95bcb32bf0ce..7ca8bd7a1598 100644 --- a/numpy/_core/src/umath/dispatching.h +++ b/numpy/_core/src/umath/dispatching.h @@ -16,6 +16,9 @@ PyUFunc_AddLoop(PyUFuncObject *ufunc, PyObject *info, int ignore_duplicate); NPY_NO_EXPORT int PyUFunc_AddLoopFromSpec_int(PyObject *ufunc, PyArrayMethod_Spec *spec, int priv); +NPY_NO_EXPORT int +PyUFunc_AddLoopsFromSpecs(PyUFunc_LoopSlot *slots); + NPY_NO_EXPORT PyArrayMethodObject * promote_and_get_ufuncimpl(PyUFuncObject *ufunc, PyArrayObject *const ops[], diff --git a/numpy/_core/src/umath/extobj.c b/numpy/_core/src/umath/extobj.c index 755d8665b11d..91b0b4c62d30 100644 --- a/numpy/_core/src/umath/extobj.c +++ b/numpy/_core/src/umath/extobj.c @@ -398,7 +398,7 @@ _error_handler(const char *name, int method, PyObject *pyfunc, char *errtype, switch(method) { case UFUNC_ERR_WARN: PyOS_snprintf(msg, sizeof(msg), "%s encountered in %s", errtype, name); - if (PyErr_Warn(PyExc_RuntimeWarning, msg) < 0) { + if (PyErr_WarnEx(PyExc_RuntimeWarning, msg, 1) < 0) { goto fail; } break; diff --git a/numpy/_core/src/umath/fast_loop_macros.h b/numpy/_core/src/umath/fast_loop_macros.h index 5143f414606e..42c2c9d8d04f 100644 --- a/numpy/_core/src/umath/fast_loop_macros.h +++ b/numpy/_core/src/umath/fast_loop_macros.h @@ -16,7 +16,7 @@ /* * largest simd vector size in bytes numpy supports - * it is currently a extremely large value as it is only used for memory + * it is currently an extremely large value as it is only used for memory * overlap checks */ #if NPY_SIMD > 0 diff --git a/numpy/_core/src/umath/legacy_array_method.c b/numpy/_core/src/umath/legacy_array_method.c index 705262fedd38..7a85937fcc8f 100644 --- a/numpy/_core/src/umath/legacy_array_method.c +++ b/numpy/_core/src/umath/legacy_array_method.c @@ -439,11 +439,10 @@ PyArray_NewLegacyWrappingArrayMethod(PyUFuncObject *ufunc, descrs[i] = bound_res->dtypes[i]->singleton; } - PyArrayMethod_Context context = { - (PyObject *)ufunc, - bound_res->method, - descrs, - }; + PyArrayMethod_Context context; + NPY_context_init(&context, descrs); + context.caller = (PyObject *)ufunc; + context.method = bound_res->method; int ret = get_initial_from_ufunc(&context, 0, context.method->legacy_initial); diff --git a/numpy/_core/src/umath/loops_autovec.dispatch.c.src b/numpy/_core/src/umath/loops_autovec.dispatch.c.src index 983fa1b5eb80..d14b0fba93a3 100644 --- a/numpy/_core/src/umath/loops_autovec.dispatch.c.src +++ b/numpy/_core/src/umath/loops_autovec.dispatch.c.src @@ -24,6 +24,8 @@ */ #define INT_left_shift_needs_clear_floatstatus #define UINT_left_shift_needs_clear_floatstatus +#define LONG_left_shift_needs_clear_floatstatus +#define ULONG_left_shift_needs_clear_floatstatus /**begin repeat * #TYPE = BYTE, UBYTE, SHORT, USHORT, INT, UINT, @@ -51,7 +53,7 @@ NPY_NO_EXPORT void NPY_CPU_DISPATCH_CURFX(@TYPE@_square) NPY_NO_EXPORT void NPY_CPU_DISPATCH_CURFX(@TYPE@_reciprocal) (char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(data)) { - UNARY_LOOP_FAST(@type@, @type@, *out = 1.0 / in); + UNARY_LOOP_FAST(@type@, @type@, *out = (@type@)(1.0 / in)); } /**begin repeat1 diff --git a/numpy/_core/src/umath/loops_hyperbolic.dispatch.cpp.src b/numpy/_core/src/umath/loops_hyperbolic.dispatch.cpp.src index 8c66229942ee..93d288fbdb2e 100755 --- a/numpy/_core/src/umath/loops_hyperbolic.dispatch.cpp.src +++ b/numpy/_core/src/umath/loops_hyperbolic.dispatch.cpp.src @@ -385,7 +385,7 @@ simd_tanh_f64(const double *src, npy_intp ssrc, double *dst, npy_intp sdst, npy_ vec_f64 b, c0, c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11, c12, c13, c14, c15, c16; if constexpr(hn::MaxLanes(f64) == 2){ vec_f64 e0e1_0, e0e1_1; - uint64_t index[hn::Lanes(f64)]; + uint64_t index[hn::MaxLanes(f64)]; hn::StoreU(idx, u64, index); /**begin repeat diff --git a/numpy/_core/src/umath/loops_logical.dispatch.cpp b/numpy/_core/src/umath/loops_logical.dispatch.cpp index ec17f90154c8..5c1834cc29e2 100644 --- a/numpy/_core/src/umath/loops_logical.dispatch.cpp +++ b/numpy/_core/src/umath/loops_logical.dispatch.cpp @@ -4,17 +4,16 @@ #include "lowlevel_strided_loops.h" #include "fast_loop_macros.h" #include - +#include "simd/simd.hpp" #include -namespace hn = hwy::HWY_NAMESPACE; struct logical_and_t {}; struct logical_or_t {}; struct absolute_t {}; struct logical_not_t {}; -const hn::ScalableTag u8; -using vec_u8 = hn::Vec; +namespace { +using namespace np::simd; /******************************************************************************* ** Defining the SIMD kernels @@ -24,86 +23,84 @@ using vec_u8 = hn::Vec; * consistent, should not be required if bool is used correctly everywhere but * you never know */ - -HWY_INLINE HWY_ATTR vec_u8 byte_to_true(vec_u8 v) +#if NPY_HWY +HWY_INLINE HWY_ATTR Vec byte_to_true(Vec v) { - return hn::IfThenZeroElse(hn::Eq(v, hn::Zero(u8)), hn::Set(u8, 1)); + return hn::IfThenZeroElse(hn::Eq(v, Zero()), Set(uint8_t(1))); } + /* * convert mask vector (0xff/0x00) to boolean true. similar to byte_to_true(), * but we've already got a mask and can skip negation. */ -HWY_INLINE HWY_ATTR vec_u8 mask_to_true(vec_u8 v) +HWY_INLINE HWY_ATTR Vec mask_to_true(Vec v) { - const vec_u8 truemask = hn::Set(u8, 1 == 1); - return hn::And(truemask, v); + return hn::IfThenElseZero(hn::Ne(v, Zero()), Set(uint8_t(1))); } + /* * For logical_and, we have to be careful to handle non-bool inputs where * bits of each operand might not overlap. Example: a = 0x01, b = 0x80 * Both evaluate to boolean true, however, a & b is false. Return value * should be consistent with byte_to_true(). */ -HWY_INLINE HWY_ATTR vec_u8 simd_logical_and_u8(vec_u8 a, vec_u8 b) +HWY_INLINE HWY_ATTR Vec simd_logical_and_u8(Vec a, Vec b) { return hn::IfThenZeroElse( - hn::Eq(hn::Zero(u8), hn::Min(a, b)), - hn::Set(u8, 1) + hn::Eq(Zero(), hn::Min(a, b)), + Set(uint8_t(1)) ); } /* * We don't really need the following, but it simplifies the templating code * below since it is paired with simd_logical_and_u8() above. */ -HWY_INLINE HWY_ATTR vec_u8 simd_logical_or_u8(vec_u8 a, vec_u8 b) +HWY_INLINE HWY_ATTR Vec simd_logical_or_u8(Vec a, Vec b) { - vec_u8 r = hn::Or(a, b); + auto r = hn::Or(a, b); return byte_to_true(r); } -HWY_INLINE HWY_ATTR npy_bool simd_any_u8(vec_u8 v) +HWY_INLINE HWY_ATTR bool simd_any_u8(Vec v) { - return hn::ReduceMax(u8, v) != 0; + return hn::ReduceMax(_Tag(), v) != 0; } -HWY_INLINE HWY_ATTR npy_bool simd_all_u8(vec_u8 v) +HWY_INLINE HWY_ATTR bool simd_all_u8(Vec v) { - return hn::ReduceMin(u8, v) != 0; + return hn::ReduceMin(_Tag(), v) != 0; } +#endif template struct BinaryLogicalTraits; template<> struct BinaryLogicalTraits { - static constexpr bool is_and = false; - static constexpr auto scalar_op = std::logical_or{}; + static constexpr bool is_and = false; + static constexpr auto scalar_op = std::logical_or{}; static constexpr auto scalar_cmp = std::not_equal_to{}; +#if NPY_HWY static constexpr auto anyall = simd_any_u8; - HWY_INLINE HWY_ATTR vec_u8 simd_op(vec_u8 a, vec_u8 b) { - return simd_logical_or_u8(a, b); - } - - HWY_INLINE HWY_ATTR vec_u8 reduce(vec_u8 a, vec_u8 b) { + static HWY_INLINE HWY_ATTR Vec simd_op(Vec a, Vec b) { return simd_logical_or_u8(a, b); } +#endif }; template<> struct BinaryLogicalTraits { - static constexpr bool is_and = true; - static constexpr auto scalar_op = std::logical_and{}; + static constexpr bool is_and = true; + static constexpr auto scalar_op = std::logical_and{}; static constexpr auto scalar_cmp = std::equal_to{}; +#if NPY_HWY static constexpr auto anyall = simd_all_u8; - HWY_INLINE HWY_ATTR vec_u8 simd_op(vec_u8 a, vec_u8 b) { - return simd_logical_and_u8(a, b); - } - - HWY_INLINE HWY_ATTR vec_u8 reduce(vec_u8 a, vec_u8 b) { + static HWY_INLINE HWY_ATTR Vec simd_op(Vec a, Vec b) { return simd_logical_and_u8(a, b); } +#endif }; template @@ -111,52 +108,52 @@ struct UnaryLogicalTraits; template<> struct UnaryLogicalTraits { - static constexpr bool is_not = true; static constexpr auto scalar_op = std::equal_to{}; - HWY_INLINE HWY_ATTR vec_u8 simd_op(vec_u8 v) { - const vec_u8 zero = hn::Zero(u8); - return mask_to_true(hn::VecFromMask(u8, hn::Eq(v, zero))); +#if NPY_HWY + static HWY_INLINE HWY_ATTR Vec simd_op(Vec v) { + const auto zero = Zero(); + return mask_to_true(hn::VecFromMask(_Tag(), hn::Eq(v, zero))); } +#endif }; template<> struct UnaryLogicalTraits { - static constexpr bool is_not = false; static constexpr auto scalar_op = std::not_equal_to{}; - HWY_INLINE HWY_ATTR vec_u8 simd_op(vec_u8 v) { +#if NPY_HWY + static HWY_INLINE HWY_ATTR Vec simd_op(Vec v) { return byte_to_true(v); } +#endif }; - +#if NPY_HWY template HWY_ATTR SIMD_MSVC_NOINLINE static void simd_binary_logical_BOOL(npy_bool* op, npy_bool* ip1, npy_bool* ip2, npy_intp len) { using Traits = BinaryLogicalTraits; - Traits traits; constexpr int UNROLL = 16; - const int vstep = hn::Lanes(u8); + HWY_LANES_CONSTEXPR int vstep = Lanes(); const int wstep = vstep * UNROLL; // Unrolled vectors loop for (; len >= wstep; len -= wstep, ip1 += wstep, ip2 += wstep, op += wstep) { - for(int i = 0; i < UNROLL; i++) { - vec_u8 a = hn::LoadU(u8, ip1 + vstep * i); - vec_u8 b = hn::LoadU(u8, ip2 + vstep * i); - vec_u8 r = traits.simd_op(a, b); - hn::StoreU(r, u8, op + vstep * i); + auto a = LoadU(ip1 + vstep * i); + auto b = LoadU(ip2 + vstep * i); + auto r = Traits::simd_op(a, b); + StoreU(r, op + vstep * i); } } // Single vectors loop for (; len >= vstep; len -= vstep, ip1 += vstep, ip2 += vstep, op += vstep) { - vec_u8 a = hn::LoadU(u8, ip1); - vec_u8 b = hn::LoadU(u8, ip2); - vec_u8 r = traits.simd_op(a, b); - hn::StoreU(r, u8, op); + auto a = LoadU(ip1); + auto b = LoadU(ip2); + auto r = Traits::simd_op(a, b); + StoreU(r, op); } // Scalar loop to finish off @@ -169,9 +166,8 @@ template HWY_ATTR SIMD_MSVC_NOINLINE static void simd_reduce_logical_BOOL(npy_bool* op, npy_bool* ip, npy_intp len) { using Traits = BinaryLogicalTraits; - Traits traits; constexpr int UNROLL = 8; - const int vstep = hn::Lanes(u8); + HWY_LANES_CONSTEXPR int vstep = Lanes(); const int wstep = vstep * UNROLL; // Unrolled vectors loop @@ -179,24 +175,24 @@ static void simd_reduce_logical_BOOL(npy_bool* op, npy_bool* ip, npy_intp len) { #if defined(NPY_HAVE_SSE2) NPY_PREFETCH(reinterpret_cast(ip + wstep), 0, 3); #endif - vec_u8 v0 = hn::LoadU(u8, ip); - vec_u8 v1 = hn::LoadU(u8, ip + vstep); - vec_u8 v2 = hn::LoadU(u8, ip + vstep * 2); - vec_u8 v3 = hn::LoadU(u8, ip + vstep * 3); - vec_u8 v4 = hn::LoadU(u8, ip + vstep * 4); - vec_u8 v5 = hn::LoadU(u8, ip + vstep * 5); - vec_u8 v6 = hn::LoadU(u8, ip + vstep * 6); - vec_u8 v7 = hn::LoadU(u8, ip + vstep * 7); + auto v0 = LoadU(ip); + auto v1 = LoadU(ip + vstep); + auto v2 = LoadU(ip + vstep * 2); + auto v3 = LoadU(ip + vstep * 3); + auto v4 = LoadU(ip + vstep * 4); + auto v5 = LoadU(ip + vstep * 5); + auto v6 = LoadU(ip + vstep * 6); + auto v7 = LoadU(ip + vstep * 7); - vec_u8 m01 = traits.reduce(v0, v1); - vec_u8 m23 = traits.reduce(v2, v3); - vec_u8 m45 = traits.reduce(v4, v5); - vec_u8 m67 = traits.reduce(v6, v7); + auto m01 = Traits::simd_op(v0, v1); + auto m23 = Traits::simd_op(v2, v3); + auto m45 = Traits::simd_op(v4, v5); + auto m67 = Traits::simd_op(v6, v7); - vec_u8 m0123 = traits.reduce(m01, m23); - vec_u8 m4567 = traits.reduce(m45, m67); + auto m0123 = Traits::simd_op(m01, m23); + auto m4567 = Traits::simd_op(m45, m67); - vec_u8 mv = traits.reduce(m0123, m4567); + auto mv = Traits::simd_op(m0123, m4567); if(Traits::anyall(mv) == !Traits::is_and) { *op = !Traits::is_and; @@ -206,7 +202,7 @@ static void simd_reduce_logical_BOOL(npy_bool* op, npy_bool* ip, npy_intp len) { // Single vectors loop for (; len >= vstep; len -= vstep, ip += vstep) { - vec_u8 v = hn::LoadU(u8, ip); + auto v = LoadU(ip); if(Traits::anyall(v) == !Traits::is_and) { *op = !Traits::is_and; return; @@ -226,25 +222,24 @@ template HWY_ATTR SIMD_MSVC_NOINLINE static void simd_unary_logical_BOOL(npy_bool* op, npy_bool* ip, npy_intp len) { using Traits = UnaryLogicalTraits; - Traits traits; constexpr int UNROLL = 16; - const int vstep = hn::Lanes(u8); + HWY_LANES_CONSTEXPR int vstep = Lanes(); const int wstep = vstep * UNROLL; // Unrolled vectors loop for (; len >= wstep; len -= wstep, ip += wstep, op += wstep) { for(int i = 0; i < UNROLL; i++) { - vec_u8 v = hn::LoadU(u8, ip + vstep * i); - vec_u8 r = traits.simd_op(v); - hn::StoreU(r, u8, op + vstep * i); + auto v = LoadU(ip + vstep * i); + auto r = Traits::simd_op(v); + StoreU(r, op + vstep * i); } } // Single vectors loop for (; len >= vstep; len -= vstep, ip += vstep, op += vstep) { - vec_u8 v = hn::LoadU(u8, ip); - vec_u8 r = traits.simd_op(v); - hn::StoreU(r, u8, op); + auto v = LoadU(ip); + auto r = Traits::simd_op(v); + StoreU(r, op); } // Scalar loop to finish off @@ -253,6 +248,9 @@ static void simd_unary_logical_BOOL(npy_bool* op, npy_bool* ip, npy_intp len) { } } +#endif //NPY_HWY +} // namespace anonymous + /******************************************************************************* ** Defining ufunc inner functions ******************************************************************************/ @@ -260,12 +258,9 @@ template static NPY_INLINE int run_binary_simd_logical_BOOL( char** args, npy_intp const* dimensions, npy_intp const* steps) { -#if NPY_SIMD - if (sizeof(npy_bool) == 1 && - IS_BLOCKABLE_BINARY(sizeof(npy_bool), NPY_SIMD_WIDTH)) { - simd_binary_logical_BOOL((npy_bool*)args[2], (npy_bool*)args[0], - (npy_bool*)args[1], dimensions[0] - ); +#if NPY_HWY + if (sizeof(npy_bool) == 1 && IS_BLOCKABLE_BINARY(sizeof(npy_bool), kMaxLanes)) { + simd_binary_logical_BOOL((npy_bool*)args[2], (npy_bool*)args[0], (npy_bool*)args[1], dimensions[0]); return 1; } #endif @@ -276,12 +271,9 @@ template static NPY_INLINE int run_reduce_simd_logical_BOOL( char** args, npy_intp const* dimensions, npy_intp const* steps) { -#if NPY_SIMD - if (sizeof(npy_bool) == 1 && - IS_BLOCKABLE_REDUCE(sizeof(npy_bool), NPY_SIMD_WIDTH)) { - simd_reduce_logical_BOOL((npy_bool*)args[0], (npy_bool*)args[1], - dimensions[0] - ); +#if NPY_HWY + if (sizeof(npy_bool) == 1 && IS_BLOCKABLE_REDUCE(sizeof(npy_bool), kMaxLanes)) { + simd_reduce_logical_BOOL((npy_bool*)args[0], (npy_bool*)args[1], dimensions[0]); return 1; } #endif @@ -292,9 +284,8 @@ template static NPY_INLINE int run_unary_simd_logical_BOOL( char** args, npy_intp const* dimensions, npy_intp const* steps) { -#if NPY_SIMD - if (sizeof(npy_bool) == 1 && - IS_BLOCKABLE_UNARY(sizeof(npy_bool), NPY_SIMD_WIDTH)) { +#if NPY_HWY + if (sizeof(npy_bool) == 1 && IS_BLOCKABLE_UNARY(sizeof(npy_bool), kMaxLanes)) { simd_unary_logical_BOOL((npy_bool*)args[1], (npy_bool*)args[0], dimensions[0]); return 1; } @@ -304,24 +295,34 @@ static NPY_INLINE int run_unary_simd_logical_BOOL( template void BOOL_binary_func_wrapper(char** args, npy_intp const* dimensions, npy_intp const* steps) { + char *ip1 = args[0], *ip2 = args[1], *op1 = args[2]; + npy_intp is1 = steps[0], is2 = steps[1], os1 = steps[2]; + npy_intp n = dimensions[0]; using Traits = BinaryLogicalTraits; - + +#if NPY_HWY if (run_binary_simd_logical_BOOL(args, dimensions, steps)) { return; } - else { - BINARY_LOOP { - const npy_bool in1 = *(npy_bool*)ip1; - const npy_bool in2 = *(npy_bool*)ip2; - *((npy_bool*)op1) = Traits::scalar_op(in1, in2); - } +#endif + + for(npy_intp i = 0; i < n; i++, ip1 += is1, ip2 += is2, op1 += os1) { + const npy_bool in1 = *(npy_bool*)ip1; + const npy_bool in2 = *(npy_bool*)ip2; + *((npy_bool*)op1) = Traits::scalar_op(in1, in2); } } template void BOOL_binary_reduce_wrapper(char** args, npy_intp const* dimensions, npy_intp const* steps) { + char *iop1 = args[0]; + npy_bool io1 = *(npy_bool *)iop1; + char *ip2 = args[1]; + npy_intp is2 = steps[1]; + npy_intp n = dimensions[0]; + npy_intp i; using Traits = BinaryLogicalTraits; -#if NPY_SIMD +#if NPY_HWY if (run_reduce_simd_logical_BOOL(args, dimensions, steps)) { return; } @@ -343,7 +344,6 @@ void BOOL_binary_reduce_wrapper(char** args, npy_intp const* dimensions, npy_int * with glibc >= 2.12 and memchr can only check for equal 1 */ static const npy_bool zero[4096]={0}; /* zero by C standard */ - npy_uintp i, n = dimensions[0]; for (i = 0; !*op && i < n - (n % sizeof(zero)); i += sizeof(zero)) { *op = memcmp(&args[1][i], zero, sizeof(zero)) != 0; @@ -355,14 +355,14 @@ void BOOL_binary_reduce_wrapper(char** args, npy_intp const* dimensions, npy_int return; } #endif - else { - BINARY_REDUCE_LOOP(npy_bool) { - const npy_bool in2 = *(npy_bool*)ip2; - io1 = Traits::scalar_op(io1, in2); - if ((Traits::is_and && !io1) || (!Traits::is_and && io1)) break; - } - *((npy_bool*)iop1) = io1; + + for(i = 0; i < n; i++, ip2 += is2) { + const npy_bool in2 = *(npy_bool*)ip2; + io1 = Traits::scalar_op(io1, in2); + if ((Traits::is_and && !io1) || (!Traits::is_and && io1)) + break; } + *((npy_bool*)iop1) = io1; } template @@ -390,15 +390,18 @@ NPY_NO_EXPORT void NPY_CPU_DISPATCH_CURFX(BOOL_logical_or)( template void BOOL_func_wrapper(char** args, npy_intp const* dimensions, npy_intp const* steps) { + char *ip1 = args[0], *op1 = args[1]; + npy_intp is1 = steps[0], os1 = steps[1]; + npy_intp n = dimensions[0]; using Traits = UnaryLogicalTraits; + if (run_unary_simd_logical_BOOL(args, dimensions, steps)) { return; } - else { - UNARY_LOOP { - npy_bool in1 = *(npy_bool*)ip1; - *((npy_bool*)op1) = Traits::scalar_op(in1, 0); - } + + for(npy_intp i = 0; i < n; i++, ip1 += is1, op1 += os1) { + npy_bool in1 = *(npy_bool*)ip1; + *((npy_bool*)op1) = Traits::scalar_op(in1, 0); } } diff --git a/numpy/_core/src/umath/loops_minmax.dispatch.c.src b/numpy/_core/src/umath/loops_minmax.dispatch.c.src index c11f391f9159..a33297ca83d5 100644 --- a/numpy/_core/src/umath/loops_minmax.dispatch.c.src +++ b/numpy/_core/src/umath/loops_minmax.dispatch.c.src @@ -123,7 +123,7 @@ simd_reduce_c_@intrin@_@sfx@(const npyv_lanetype_@sfx@ *ip, npyv_lanetype_@sfx@ npyv_@sfx@ acc = npyv_setall_@sfx@(op1[0]); for (; len >= wstep; len -= wstep, ip += wstep) { #ifdef NPY_HAVE_SSE2 - NPY_PREFETCH(ip + wstep, 0, 3); + NPY_PREFETCH((const char*)(ip + wstep), 0, 3); #endif npyv_@sfx@ v0 = npyv_load_@sfx@(ip + vstep * 0); npyv_@sfx@ v1 = npyv_load_@sfx@(ip + vstep * 1); diff --git a/numpy/_core/src/umath/loops_modulo.dispatch.c.src b/numpy/_core/src/umath/loops_modulo.dispatch.c.src index 032cc3344060..4645fe14a487 100644 --- a/numpy/_core/src/umath/loops_modulo.dispatch.c.src +++ b/numpy/_core/src/umath/loops_modulo.dispatch.c.src @@ -490,12 +490,16 @@ vsx4_simd_@func@_by_scalar_contig_@sfx@(char **args, npy_intp len) #else /* fmod and remainder */ for (; len > 0; --len, ++src1, ++dst1) { const npyv_lanetype_@sfx@ a = *src1; - *dst1 = a % scalar; + if (NPY_UNLIKELY(a == NPY_MIN_INT@len@ && scalar == -1)) { + *dst1 = 0; + } else { + *dst1 = a % scalar; #if @id@ == 1 /* remainder */ - if (!((a > 0) == (scalar > 0) || *dst1 == 0)) { - *dst1 += scalar; - } + if (!((a > 0) == (scalar > 0) || *dst1 == 0)) { + *dst1 += scalar; + } #endif + } } #endif npyv_cleanup(); diff --git a/numpy/_core/src/umath/loops_trigonometric.dispatch.cpp b/numpy/_core/src/umath/loops_trigonometric.dispatch.cpp index ae696db4cd4a..d298a8596cc4 100644 --- a/numpy/_core/src/umath/loops_trigonometric.dispatch.cpp +++ b/numpy/_core/src/umath/loops_trigonometric.dispatch.cpp @@ -3,7 +3,9 @@ #include "loops_utils.h" #include "simd/simd.h" +#include "simd/simd.hpp" #include + namespace hn = hwy::HWY_NAMESPACE; /* @@ -184,7 +186,7 @@ simd_sincos_f32(const float *src, npy_intp ssrc, float *dst, npy_intp sdst, "larger than 256 bits."); simd_maski = ((uint8_t *)&simd_maski)[0]; #endif - float NPY_DECL_ALIGNED(NPY_SIMD_WIDTH) ip_fback[hn::Lanes(f32)]; + float NPY_DECL_ALIGNED(NPY_SIMD_WIDTH) ip_fback[hn::MaxLanes(f32)]; hn::Store(x_in, f32, ip_fback); // process elements using libc for large elements diff --git a/numpy/_core/src/umath/matmul.c.src b/numpy/_core/src/umath/matmul.c.src index d9be7b1d6826..11e014acec7f 100644 --- a/numpy/_core/src/umath/matmul.c.src +++ b/numpy/_core/src/umath/matmul.c.src @@ -16,6 +16,7 @@ +#include "blas_utils.h" #include "npy_cblas.h" #include "arraytypes.h" /* For TYPE_dot functions */ @@ -27,6 +28,8 @@ ***************************************************************************** */ +#define ABS(x) ((x) < 0 ? -(x) : (x)) + #if defined(HAVE_CBLAS) /* * -1 to be conservative, in case blas internally uses a for loop with an @@ -120,7 +123,7 @@ static inline void } } -NPY_NO_EXPORT void +static void @name@_gemv(void *ip1, npy_intp is1_m, npy_intp is1_n, void *ip2, npy_intp is2_n, void *op, npy_intp op_m, @@ -156,7 +159,7 @@ NPY_NO_EXPORT void is2_n / sizeof(@typ@), @step0@, op, op_m / sizeof(@typ@)); } -NPY_NO_EXPORT void +static void @name@_matmul_matrixmatrix(void *ip1, npy_intp is1_m, npy_intp is1_n, void *ip2, npy_intp is2_n, npy_intp is2_p, void *op, npy_intp os_m, npy_intp os_p, @@ -260,7 +263,7 @@ NPY_NO_EXPORT void * #IS_HALF = 0, 0, 0, 1, 0*13# */ -NPY_NO_EXPORT void +static void @TYPE@_matmul_inner_noblas(void *_ip1, npy_intp is1_m, npy_intp is1_n, void *_ip2, npy_intp is2_n, npy_intp is2_p, void *_op, npy_intp os_m, npy_intp os_p, @@ -318,7 +321,7 @@ NPY_NO_EXPORT void } /**end repeat**/ -NPY_NO_EXPORT void +static void BOOL_matmul_inner_noblas(void *_ip1, npy_intp is1_m, npy_intp is1_n, void *_ip2, npy_intp is2_n, npy_intp is2_p, void *_op, npy_intp os_m, npy_intp os_p, @@ -357,7 +360,7 @@ BOOL_matmul_inner_noblas(void *_ip1, npy_intp is1_m, npy_intp is1_n, } } -NPY_NO_EXPORT void +static void OBJECT_matmul_inner_noblas(void *_ip1, npy_intp is1_m, npy_intp is1_n, void *_ip2, npy_intp is2_n, npy_intp is2_p, void *_op, npy_intp os_m, npy_intp os_p, @@ -554,9 +557,9 @@ NPY_NO_EXPORT void } else { /* matrix @ matrix * copy if not blasable, see gh-12365 & gh-23588 */ - npy_bool i1_transpose = is1_m < is1_n, - i2_transpose = is2_n < is2_p, - o_transpose = os_m < os_p; + npy_bool i1_transpose = ABS(is1_m) < ABS(is1_n), + i2_transpose = ABS(is2_n) < ABS(is2_p), + o_transpose = ABS(os_m) < ABS(os_p); npy_intp tmp_is1_m = i1_transpose ? sz : sz*dn, tmp_is1_n = i1_transpose ? sz*dm : sz, @@ -596,7 +599,7 @@ NPY_NO_EXPORT void * Use transpose equivalence: * matmul(a, b, o) == matmul(b.T, a.T, o.T) */ - if (o_f_blasable) { + if (o_transpose) { @TYPE@_matmul_matrixmatrix( ip2_, is2_p_, is2_n_, ip1_, is1_n_, is1_m_, @@ -629,6 +632,11 @@ NPY_NO_EXPORT void #endif } #if @USEBLAS@ && defined(HAVE_CBLAS) +#if NPY_BLAS_CHECK_FPE_SUPPORT + if (!npy_blas_supports_fpe()) { + npy_clear_floatstatus_barrier((char*)args); + } +#endif if (allocate_buffer) free(tmp_ip12op); #endif } @@ -653,7 +661,7 @@ NPY_NO_EXPORT void * #prefix = c, z, 0# * #USE_BLAS = 1, 1, 0# */ -NPY_NO_EXPORT void +static void @name@_dotc(char *ip1, npy_intp is1, char *ip2, npy_intp is2, char *op, npy_intp n, void *NPY_UNUSED(ignore)) { @@ -749,6 +757,7 @@ OBJECT_dotc(char *ip1, npy_intp is1, char *ip2, npy_intp is2, char *op, npy_intp * CFLOAT, CDOUBLE, CLONGDOUBLE, OBJECT# * #DOT = dot*15, dotc*4# * #CHECK_PYERR = 0*18, 1# + * #CHECK_BLAS = 1*2, 0*13, 1*2, 0*2# */ NPY_NO_EXPORT void @TYPE@_vecdot(char **args, npy_intp const *dimensions, npy_intp const *steps, @@ -772,6 +781,11 @@ NPY_NO_EXPORT void } #endif } +#if @CHECK_BLAS@ && NPY_BLAS_CHECK_FPE_SUPPORT + if (!npy_blas_supports_fpe()) { + npy_clear_floatstatus_barrier((char*)args); + } +#endif } /**end repeat**/ @@ -787,7 +801,7 @@ NPY_NO_EXPORT void * #step1 = &oneF, &oneD# * #step0 = &zeroF, &zeroD# */ -NPY_NO_EXPORT void +static void @name@_vecmat_via_gemm(void *ip1, npy_intp is1_n, void *ip2, npy_intp is2_n, npy_intp is2_m, void *op, npy_intp os_m, @@ -878,6 +892,11 @@ NPY_NO_EXPORT void #endif } } +#if @USEBLAS@ && NPY_BLAS_CHECK_FPE_SUPPORT + if (!npy_blas_supports_fpe()) { + npy_clear_floatstatus_barrier((char*)args); + } +#endif } /**end repeat**/ @@ -943,5 +962,10 @@ NPY_NO_EXPORT void #endif } } +#if @USEBLAS@ && NPY_BLAS_CHECK_FPE_SUPPORT + if (!npy_blas_supports_fpe()) { + npy_clear_floatstatus_barrier((char*)args); + } +#endif } /**end repeat**/ diff --git a/numpy/_core/src/umath/reduction.c b/numpy/_core/src/umath/reduction.c index b376b94936bc..384ac052b226 100644 --- a/numpy/_core/src/umath/reduction.c +++ b/numpy/_core/src/umath/reduction.c @@ -372,7 +372,7 @@ PyUFunc_ReduceWrapper(PyArrayMethod_Context *context, PyArray_NDIM(result), PyArray_DIMS(result), PyArray_DESCR(result), PyArray_BYTES(result), PyArray_STRIDES(result), - op_dtypes[0], initial_buf); + op_dtypes[0], initial_buf, NPY_UNSAFE_CASTING); if (ret < 0) { goto fail; } diff --git a/numpy/_core/src/umath/scalarmath.c.src b/numpy/_core/src/umath/scalarmath.c.src index a565eee8f939..e2d7c22f5deb 100644 --- a/numpy/_core/src/umath/scalarmath.c.src +++ b/numpy/_core/src/umath/scalarmath.c.src @@ -826,6 +826,10 @@ typedef enum { * npy_long, npy_ulong, npy_longlong, npy_ulonglong, * npy_half, npy_float, npy_double, npy_longdouble, * npy_cfloat, npy_cdouble, npy_clongdouble# + * #scalar_type = npy_byte, npy_ubyte, npy_short, npy_ushort, npy_int, npy_uint, + * npy_long, npy_ulong, npy_longlong, npy_ulonglong, + * npy_half, npy_float, npy_double, npy_longdouble, + * npy_float, npy_double, npy_longdouble# * #c = x*14, f, , l# */ @@ -846,10 +850,10 @@ typedef enum { *result = npy_float_to_half((float)(value)) #elif defined(IS_CFLOAT) || defined(IS_CDOUBLE) || defined(IS_CLONGDOUBLE) #define CONVERT_TO_RESULT(value) \ - npy_csetreal@c@(result, value); \ + npy_csetreal@c@(result, ((@scalar_type@)(value))); \ npy_csetimag@c@(result, 0) #else - #define CONVERT_TO_RESULT(value) *result = value + #define CONVERT_TO_RESULT(value) *result = ((@type@)(value)) #endif @@ -1242,7 +1246,7 @@ static PyObject * */ return PyGenericArrType_Type.tp_as_number->nb_@oper@(a,b); case CONVERT_PYSCALAR: - if (@NAME@_setitem(other, (char *)&other_val, NULL) < 0) { + if (@NAME@_setitem(NULL, other, (char *)&other_val) < 0) { return NULL; } break; @@ -1386,14 +1390,14 @@ static PyObject * case DEFER_TO_OTHER_KNOWN_SCALAR: Py_RETURN_NOTIMPLEMENTED; case CONVERSION_SUCCESS: - other_val = other_val_conv; /* Need a float value */ + other_val = (double)other_val_conv; /* Need a float value */ break; /* successfully extracted value we can proceed */ case OTHER_IS_UNKNOWN_OBJECT: case PROMOTION_REQUIRED: return PyGenericArrType_Type.tp_as_number->nb_true_divide(a,b); case CONVERT_PYSCALAR: /* This is the special behavior, convert to float64 directly */ - if (DOUBLE_setitem(other, (char *)&other_val, NULL) < 0) { + if (DOUBLE_setitem(NULL, other, (char *)&other_val) < 0) { return NULL; } break; @@ -1405,12 +1409,12 @@ static PyObject * npy_clear_floatstatus_barrier((char*)&arg1); if (is_forward) { - arg1 = PyArrayScalar_VAL(a, @Name@); + arg1 = (double)PyArrayScalar_VAL(a, @Name@); arg2 = other_val; } else { arg1 = other_val; - arg2 = PyArrayScalar_VAL(b, @Name@); + arg2 = (double)PyArrayScalar_VAL(b, @Name@); } /* Note that arguments are already float64, so we can just divide */ @@ -1512,7 +1516,7 @@ static PyObject * case PROMOTION_REQUIRED: return PyGenericArrType_Type.tp_as_number->nb_power(a, b, modulo); case CONVERT_PYSCALAR: - if (@NAME@_setitem(other, (char *)&other_val, NULL) < 0) { + if (@NAME@_setitem(NULL, other, (char *)&other_val) < 0) { return NULL; } break; @@ -1831,7 +1835,7 @@ static PyObject * } return @func@(@to_ctype@(npy_creal@n@(PyArrayScalar_VAL(obj, @Name@)))); #else - return @func@(@to_ctype@(PyArrayScalar_VAL(obj, @Name@))); + return @func@((double)(@to_ctype@(PyArrayScalar_VAL(obj, @Name@)))); #endif } /**end repeat**/ @@ -1920,7 +1924,7 @@ static PyObject* case PROMOTION_REQUIRED: return PyGenericArrType_Type.tp_richcompare(self, other, cmp_op); case CONVERT_PYSCALAR: - if (@NAME@_setitem(other, (char *)&arg2, NULL) < 0) { + if (@NAME@_setitem(NULL, other, (char *)&arg2) < 0) { return NULL; } break; diff --git a/numpy/_core/src/umath/string_buffer.h b/numpy/_core/src/umath/string_buffer.h index 554f9ece5197..1e7bea49a365 100644 --- a/numpy/_core/src/umath/string_buffer.h +++ b/numpy/_core/src/umath/string_buffer.h @@ -17,6 +17,12 @@ #define CHECK_OVERFLOW(index) if (buf + (index) >= after) return 0 #define MSB(val) ((val) >> 7 & 1) +#ifdef _MSC_VER +// MSVC sometimes complains (C4715: "not all control paths return a value") +// on switch statements over enum classes, even though all enum values are covered. +// This warning is suppressed here to avoid invasive changes. +# pragma warning(disable:4715) +#endif enum class ENCODING { ASCII, UTF32, UTF8 @@ -297,6 +303,18 @@ struct Buffer { return num_codepoints; } + inline size_t + buffer_width() + { + switch (enc) { + case ENCODING::ASCII: + case ENCODING::UTF8: + return after - buf; + case ENCODING::UTF32: + return (after - buf) / sizeof(npy_ucs4); + } + } + inline Buffer& operator+=(npy_int64 rhs) { diff --git a/numpy/_core/src/umath/string_ufuncs.cpp b/numpy/_core/src/umath/string_ufuncs.cpp index 5b4b67cda625..9b3d86c25301 100644 --- a/numpy/_core/src/umath/string_ufuncs.cpp +++ b/numpy/_core/src/umath/string_ufuncs.cpp @@ -15,6 +15,7 @@ #include "dtypemeta.h" #include "convert_datatype.h" #include "gil_utils.h" +#include "templ_common.h" /* for npy_mul_size_with_overflow_size_t */ #include "string_ufuncs.h" #include "string_fastsearch.h" @@ -166,26 +167,44 @@ string_add(Buffer buf1, Buffer buf2, Buffer out) template -static inline void +static inline int string_multiply(Buffer buf1, npy_int64 reps, Buffer out) { size_t len1 = buf1.num_codepoints(); if (reps < 1 || len1 == 0) { out.buffer_fill_with_zeros_after_index(0); - return; + return 0; } if (len1 == 1) { out.buffer_memset(*buf1, reps); out.buffer_fill_with_zeros_after_index(reps); + return 0; } - else { - for (npy_int64 i = 0; i < reps; i++) { - buf1.buffer_memcpy(out, len1); - out += len1; - } - out.buffer_fill_with_zeros_after_index(0); + + size_t newlen; + if (NPY_UNLIKELY(npy_mul_with_overflow_size_t(&newlen, reps, len1) != 0) || newlen > PY_SSIZE_T_MAX) { + return -1; + } + + size_t pad = 0; + size_t width = out.buffer_width(); + if (width < newlen) { + reps = width / len1; + pad = width % len1; } + + for (npy_int64 i = 0; i < reps; i++) { + buf1.buffer_memcpy(out, len1); + out += len1; + } + + buf1.buffer_memcpy(out, pad); + out += pad; + + out.buffer_fill_with_zeros_after_index(0); + + return 0; } @@ -238,7 +257,9 @@ string_multiply_strint_loop(PyArrayMethod_Context *context, while (N--) { Buffer buf(in1, elsize); Buffer outbuf(out, outsize); - string_multiply(buf, *(npy_int64 *)in2, outbuf); + if (NPY_UNLIKELY(string_multiply(buf, *(npy_int64 *)in2, outbuf) < 0)) { + npy_gil_error(PyExc_OverflowError, "Overflow detected in string multiply"); + } in1 += strides[0]; in2 += strides[1]; @@ -267,7 +288,9 @@ string_multiply_intstr_loop(PyArrayMethod_Context *context, while (N--) { Buffer buf(in2, elsize); Buffer outbuf(out, outsize); - string_multiply(buf, *(npy_int64 *)in1, outbuf); + if (NPY_UNLIKELY(string_multiply(buf, *(npy_int64 *)in1, outbuf) < 0)) { + npy_gil_error(PyExc_OverflowError, "Overflow detected in string multiply"); + } in1 += strides[0]; in2 += strides[1]; @@ -752,10 +775,11 @@ string_multiply_resolve_descriptors( if (given_descrs[2] == NULL) { PyErr_SetString( PyExc_TypeError, - "The 'out' kwarg is necessary. Use numpy.strings.multiply without it."); + "The 'out' kwarg is necessary when using the string multiply ufunc " + "directly. Use numpy.strings.multiply to multiply strings without " + "specifying 'out'."); return _NPY_ERROR_OCCURRED_IN_CAST; } - loop_descrs[0] = NPY_DT_CALL_ensure_canonical(given_descrs[0]); if (loop_descrs[0] == NULL) { return _NPY_ERROR_OCCURRED_IN_CAST; @@ -917,7 +941,7 @@ string_expandtabs_length_promoter(PyObject *NPY_UNUSED(ufunc), PyArray_DTypeMeta *const op_dtypes[], PyArray_DTypeMeta *const signature[], PyArray_DTypeMeta *new_op_dtypes[]) { - Py_INCREF(op_dtypes[0]); + Py_XINCREF(op_dtypes[0]); new_op_dtypes[0] = op_dtypes[0]; new_op_dtypes[1] = NPY_DT_NewRef(&PyArray_Int64DType); new_op_dtypes[2] = PyArray_DTypeFromTypeNum(NPY_DEFAULT_INT); @@ -1521,7 +1545,7 @@ init_string_ufuncs(PyObject *umath) dtypes[0] = NPY_OBJECT; dtypes[1] = NPY_BOOL; - const char *unary_buffer_method_names[] = { + const char *const unary_buffer_method_names[] = { "isalpha", "isalnum", "isdigit", "isspace", "islower", "isupper", "istitle", "isdecimal", "isnumeric", }; @@ -1635,7 +1659,7 @@ init_string_ufuncs(PyObject *umath) dtypes[2] = dtypes[3] = NPY_INT64; dtypes[4] = NPY_BOOL; - const char *startswith_endswith_names[] = { + const char *const startswith_endswith_names[] = { "startswith", "endswith" }; @@ -1664,7 +1688,7 @@ init_string_ufuncs(PyObject *umath) dtypes[0] = dtypes[1] = NPY_OBJECT; - const char *strip_whitespace_names[] = { + const char *const strip_whitespace_names[] = { "_lstrip_whitespace", "_rstrip_whitespace", "_strip_whitespace" }; @@ -1691,7 +1715,7 @@ init_string_ufuncs(PyObject *umath) dtypes[0] = dtypes[1] = dtypes[2] = NPY_OBJECT; - const char *strip_chars_names[] = { + const char *const strip_chars_names[] = { "_lstrip_chars", "_rstrip_chars", "_strip_chars" }; @@ -1750,7 +1774,7 @@ init_string_ufuncs(PyObject *umath) dtypes[1] = NPY_INT64; - const char *center_ljust_rjust_names[] = { + const char *const center_ljust_rjust_names[] = { "_center", "_ljust", "_rjust" }; @@ -1827,7 +1851,7 @@ init_string_ufuncs(PyObject *umath) dtypes[0] = dtypes[1] = dtypes[3] = dtypes[4] = dtypes[5] = NPY_OBJECT; dtypes[2] = NPY_INT64; - const char *partition_names[] = {"_partition_index", "_rpartition_index"}; + const char *const partition_names[] = {"_partition_index", "_rpartition_index"}; static STARTPOSITION partition_startpositions[] = { STARTPOSITION::FRONT, STARTPOSITION::BACK diff --git a/numpy/_core/src/umath/stringdtype_ufuncs.cpp b/numpy/_core/src/umath/stringdtype_ufuncs.cpp index 37ae0a39a349..ebc10586bf8b 100644 --- a/numpy/_core/src/umath/stringdtype_ufuncs.cpp +++ b/numpy/_core/src/umath/stringdtype_ufuncs.cpp @@ -137,9 +137,9 @@ static int multiply_loop_core( size_t newsize; int overflowed = npy_mul_with_overflow_size_t( &newsize, cursize, factor); - if (overflowed) { - npy_gil_error(PyExc_MemoryError, - "Failed to allocate string in string multiply"); + if (overflowed || newsize > PY_SSIZE_T_MAX) { + npy_gil_error(PyExc_OverflowError, + "Overflow encountered in string multiply"); goto fail; } @@ -1736,7 +1736,7 @@ center_ljust_rjust_strided_loop(PyArrayMethod_Context *context, size_t num_codepoints = inbuf.num_codepoints(); npy_intp width = (npy_intp)*(npy_int64*)in2; - if (num_codepoints > (size_t)width) { + if ((npy_intp)num_codepoints > width) { width = num_codepoints; } @@ -1748,9 +1748,9 @@ center_ljust_rjust_strided_loop(PyArrayMethod_Context *context, width - num_codepoints); newsize += s1.size; - if (overflowed) { - npy_gil_error(PyExc_MemoryError, - "Failed to allocate string in %s", ufunc_name); + if (overflowed || newsize > PY_SSIZE_T_MAX) { + npy_gil_error(PyExc_OverflowError, + "Overflow encountered in %s", ufunc_name); goto fail; } @@ -1866,8 +1866,8 @@ zfill_strided_loop(PyArrayMethod_Context *context, { Buffer inbuf((char *)is.buf, is.size); size_t in_codepoints = inbuf.num_codepoints(); - size_t width = (size_t)*(npy_int64 *)in2; - if (in_codepoints > width) { + npy_intp width = (npy_intp)*(npy_int64*)in2; + if ((npy_intp)in_codepoints > width) { width = in_codepoints; } // number of leading one-byte characters plus the size of the @@ -2264,10 +2264,14 @@ slice_strided_loop(PyArrayMethod_Context *context, char *const data[], if (step == 1) { // step == 1 is the easy case, we can just use memcpy - npy_intp outsize = ((size_t)stop < num_codepoints - ? codepoint_offsets[stop] - : (unsigned char *)is.buf + is.size) - - codepoint_offsets[start]; + unsigned char *start_bounded = ((size_t)start < num_codepoints + ? codepoint_offsets[start] + : (unsigned char *)is.buf + is.size); + unsigned char *stop_bounded = ((size_t)stop < num_codepoints + ? codepoint_offsets[stop] + : (unsigned char *)is.buf + is.size); + npy_intp outsize = stop_bounded - start_bounded; + outsize = outsize < 0 ? 0 : outsize; if (load_new_string(ops, &os, outsize, oallocator, "slice") < 0) { goto fail; @@ -2276,7 +2280,7 @@ slice_strided_loop(PyArrayMethod_Context *context, char *const data[], /* explicitly discard const; initializing new buffer */ char *buf = (char *)os.buf; - memcpy(buf, codepoint_offsets[start], outsize); + memcpy(buf, start_bounded, outsize); } else { // handle step != 1 @@ -2605,7 +2609,7 @@ add_object_and_unicode_promoters(PyObject *umath, const char* ufunc_name, NPY_NO_EXPORT int init_stringdtype_ufuncs(PyObject *umath) { - static const char *comparison_ufunc_names[6] = { + static const char *const comparison_ufunc_names[6] = { "equal", "not_equal", "less", "less_equal", "greater_equal", "greater", }; @@ -2654,7 +2658,7 @@ init_stringdtype_ufuncs(PyObject *umath) return -1; } - const char *unary_loop_names[] = { + const char *const unary_loop_names[] = { "isalpha", "isdecimal", "isdigit", "isnumeric", "isspace", "isalnum", "istitle", "isupper", "islower", }; @@ -2874,7 +2878,7 @@ init_stringdtype_ufuncs(PyObject *umath) &PyArray_StringDType, &PyArray_StringDType }; - const char *strip_whitespace_names[] = { + const char *const strip_whitespace_names[] = { "_lstrip_whitespace", "_rstrip_whitespace", "_strip_whitespace", }; @@ -2898,7 +2902,7 @@ init_stringdtype_ufuncs(PyObject *umath) &PyArray_StringDType, &PyArray_StringDType, &PyArray_StringDType }; - const char *strip_chars_names[] = { + const char *const strip_chars_names[] = { "_lstrip_chars", "_rstrip_chars", "_strip_chars", }; @@ -3082,7 +3086,7 @@ init_stringdtype_ufuncs(PyObject *umath) &PyArray_StringDType }; - const char *partition_names[] = {"_partition", "_rpartition"}; + const char *const partition_names[] = {"_partition", "_rpartition"}; static STARTPOSITION partition_startpositions[] = { STARTPOSITION::FRONT, STARTPOSITION::BACK diff --git a/numpy/_core/src/umath/svml b/numpy/_core/src/umath/svml index 32bf2a984207..3a713b130183 160000 --- a/numpy/_core/src/umath/svml +++ b/numpy/_core/src/umath/svml @@ -1 +1 @@ -Subproject commit 32bf2a98420762a63ab418aaa0a7d6e17eb9627a +Subproject commit 3a713b13018325451c1b939d3914ceff5ec68e19 diff --git a/numpy/_core/src/umath/ufunc_object.c b/numpy/_core/src/umath/ufunc_object.c index 4cdde8d3d77d..63ac438eabc4 100644 --- a/numpy/_core/src/umath/ufunc_object.c +++ b/numpy/_core/src/umath/ufunc_object.c @@ -65,6 +65,8 @@ #include "mapping.h" #include "npy_static_data.h" #include "multiarraymodule.h" +#include "number.h" +#include "scalartypes.h" // for is_anyscalar_exact and scalar_value /********** PRINTF DEBUG TRACING **************/ #define NPY_UF_DBG_TRACING 0 @@ -1368,7 +1370,7 @@ _parse_axes_arg(PyUFuncObject *ufunc, int op_core_num_dims[], PyObject *axes, * Get axes tuple for operand. If not a tuple already, make it one if * there is only one axis (its content is checked later). */ - op_axes_tuple = PyList_GET_ITEM(axes, iop); + op_axes_tuple = PyList_GET_ITEM(axes, iop); // noqa: borrowed-ref - manual fix needed if (PyTuple_Check(op_axes_tuple)) { if (PyTuple_Size(op_axes_tuple) != op_ncore) { /* must have been a tuple with too many entries. */ @@ -2084,11 +2086,10 @@ PyUFunc_GeneralizedFunctionInternal(PyUFuncObject *ufunc, NPY_SIZEOF_INTP * nop); /* Final preparation of the arraymethod call */ - PyArrayMethod_Context context = { - .caller = (PyObject *)ufunc, - .method = ufuncimpl, - .descriptors = operation_descrs, - }; + PyArrayMethod_Context context; + NPY_context_init(&context, operation_descrs); + context.caller = (PyObject *)ufunc; + context.method = ufuncimpl; PyArrayMethod_StridedLoop *strided_loop; NPY_ARRAYMETHOD_FLAGS flags = 0; @@ -2203,11 +2204,10 @@ PyUFunc_GenericFunctionInternal(PyUFuncObject *ufunc, } /* Final preparation of the arraymethod call */ - PyArrayMethod_Context context = { - .caller = (PyObject *)ufunc, - .method = ufuncimpl, - .descriptors = operation_descrs, - }; + PyArrayMethod_Context context; + NPY_context_init(&context, operation_descrs); + context.caller = (PyObject *)ufunc; + context.method = ufuncimpl; /* Do the ufunc loop */ if (wheremask != NULL) { @@ -2553,11 +2553,10 @@ PyUFunc_Reduce(PyUFuncObject *ufunc, return NULL; } - PyArrayMethod_Context context = { - .caller = (PyObject *)ufunc, - .method = ufuncimpl, - .descriptors = descrs, - }; + PyArrayMethod_Context context; + NPY_context_init(&context, descrs); + context.caller = (PyObject *)ufunc; + context.method = ufuncimpl; PyArrayObject *result = PyUFunc_ReduceWrapper(&context, arr, out, wheremask, axis_flags, keepdims, @@ -2629,12 +2628,10 @@ PyUFunc_Accumulate(PyUFuncObject *ufunc, PyArrayObject *arr, PyArrayObject *out, assert(PyArray_EquivTypes(descrs[0], descrs[1]) && PyArray_EquivTypes(descrs[0], descrs[2])); - PyArrayMethod_Context context = { - .caller = (PyObject *)ufunc, - .method = ufuncimpl, - .descriptors = descrs, - }; - + PyArrayMethod_Context context; + NPY_context_init(&context, descrs); + context.caller = (PyObject *)ufunc, + context.method = ufuncimpl, ndim = PyArray_NDIM(arr); #if NPY_UF_DBG_TRACING @@ -3061,12 +3058,10 @@ PyUFunc_Reduceat(PyUFuncObject *ufunc, PyArrayObject *arr, PyArrayObject *ind, goto fail; } - PyArrayMethod_Context context = { - .caller = (PyObject *)ufunc, - .method = ufuncimpl, - .descriptors = descrs, - }; - + PyArrayMethod_Context context; + NPY_context_init(&context, descrs); + context.caller = (PyObject *)ufunc, + context.method = ufuncimpl, ndim = PyArray_NDIM(arr); #if NPY_UF_DBG_TRACING @@ -3728,6 +3723,8 @@ PyUFunc_GenericReduction(PyUFuncObject *ufunc, goto fail; } + Py_XDECREF(out); + Py_DECREF(signature[0]); Py_DECREF(signature[1]); Py_DECREF(signature[2]); @@ -3753,6 +3750,8 @@ PyUFunc_GenericReduction(PyUFuncObject *ufunc, return wrapped_result; fail: + Py_XDECREF(out); + Py_XDECREF(signature[0]); Py_XDECREF(signature[1]); Py_XDECREF(signature[2]); @@ -4272,6 +4271,148 @@ replace_with_wrapped_result_and_return(PyUFuncObject *ufunc, return NULL; } +/* + * Check whether the input object is a known scalar and whether the ufunc has + * a suitable inner loop for it, which takes and returns the data type of the + * input (this function is not called if output or any other argument was given). + * If a loop was found, call it and store the result. + * + * Returns -2 if a short-cut is not possible, 0 on success and -1 on error. + */ +static int +try_trivial_scalar_call( + PyUFuncObject *ufunc, PyObject *const obj, PyObject **result) +{ + assert(ufunc->nin == 1 && ufunc->nout == 1 && !ufunc->core_enabled); + npy_clongdouble cin, cout; // aligned storage, using longest type. + char *in = (char *)&cin, *out = (char *)&cout; + char *data[] = {in, out}; + int ret = -2; + PyArray_Descr *dt; + /* + * For supported input, get input pointer and descriptor. Otherwise, bail. + */ + if (obj == Py_False || obj == Py_True) { + *(npy_bool *)in = (obj == Py_True); + dt = PyArray_DescrFromType(NPY_BOOL); + } + else if (PyFloat_CheckExact(obj)) { + *(double *)in = PyFloat_AS_DOUBLE(obj); + dt = PyArray_DescrFromType(NPY_FLOAT64); + } + else if (PyLong_CheckExact(obj)) { + int overflow; + npy_intp val = PyLong_AsLongAndOverflow(obj, &overflow); + if (overflow) { + return -2; // bail, main code perhaps deals with this. + } + if (error_converting(val)) { + return -1; // should never happen; pass on it if does. + } + *(npy_intp *)in = val; + dt = PyArray_DescrFromType(NPY_INTP); + } + else if (PyComplex_CheckExact(obj)) { + Py_complex oop = PyComplex_AsCComplex(obj); + if (error_converting(oop.real)) { + return -1; // should never happen; pass on it if does. + } + *(double *)in = oop.real; + *(double *)(in+sizeof(double)) = oop.imag; + dt = PyArray_DescrFromType(NPY_COMPLEX128); + } + else if (is_anyscalar_exact(obj)) { + dt = PyArray_DescrFromScalar(obj); + if (!PyDataType_ISNUMBER(dt)) { + goto bail; + } + data[0] = scalar_value(obj, dt); + } + else { + return -2; + } + /* + * Check the ufunc supports our descriptor, bailing (return -2) if not. + */ + // Try getting info from the (private) cache. Fall back if not found, + // so that the the dtype gets registered and things will work next time. + PyArray_DTypeMeta *op_dtypes[2] = {NPY_DTYPE(dt), NULL}; +#ifdef Py_GIL_DISABLED + // Other threads may be in the process of filling the dispatch cache, + // so we need to acquire the free-threading-specific dispatch cache mutex + // before reading the cache + PyObject *info = PyArrayIdentityHash_GetItemWithLock( // borrowed reference. + (PyArrayIdentityHash *)ufunc->_dispatch_cache, + (PyObject **)op_dtypes); +#else + PyObject *info = PyArrayIdentityHash_GetItem( // borrowed reference. + (PyArrayIdentityHash *)ufunc->_dispatch_cache, + (PyObject **)op_dtypes); +#endif + if (info == NULL) { + goto bail; + } + // Check actual dtype is correct (can be wrong with promotion). + PyObject *all_dtypes = PyTuple_GET_ITEM(info, 0); + if ((PyTuple_GET_ITEM(all_dtypes, 0) != (PyObject *)NPY_DTYPE(dt)) || + (PyTuple_GET_ITEM(all_dtypes, 1) != (PyObject *)NPY_DTYPE(dt))) { + goto bail; + } + // Get method, bailing if not an arraymethod (e.g., a promotor). + PyArrayMethodObject *method = (PyArrayMethodObject *)PyTuple_GET_ITEM(info, 1); + if (!PyObject_TypeCheck(method, &PyArrayMethod_Type)) { + goto bail; + } + // Get loop, requiring that the output and input dtype are the same. + PyArrayMethod_Context context; + PyArray_Descr *descrs[2] = {dt, dt}; + NPY_context_init(&context, descrs); + context.caller = (PyObject *)ufunc; + context.method = method; + npy_intp strides[2] = {0, 0}; // 0 ensures scalar math, not SIMD for half. + PyArrayMethod_StridedLoop *strided_loop; + NpyAuxData *auxdata = NULL; + NPY_ARRAYMETHOD_FLAGS flags = 0; + if (method->get_strided_loop(&context, 1, 0, strides, + &strided_loop, &auxdata, &flags) < 0) { + ret = -1; // Should not happen, so raise error if it does anyway. + goto bail; + } + /* + * Call loop with single element, checking floating point errors. + */ + if (!(flags & NPY_METH_NO_FLOATINGPOINT_ERRORS)) { + npy_clear_floatstatus(); + } + npy_intp n = 1; + ret = strided_loop(&context, data, &n, strides, auxdata); + NPY_AUXDATA_FREE(auxdata); + if (ret == 0) { + if (PyErr_Occurred()) { + ret = -1; + goto bail; + } + if (!(flags & NPY_METH_NO_FLOATINGPOINT_ERRORS)) { + // Check for any unmasked floating point errors (note: faster + // than _check_ufunc_fperr as one doesn't need mask up front). + int fpe_errors = npy_get_floatstatus(); + if (fpe_errors) { + if (PyUFunc_GiveFloatingpointErrors( + ufunc_get_name_cstr(ufunc), fpe_errors) < 0) { + ret = -1; // Real error, falling back would not help. + goto bail; + } + } + } + *result = PyArray_Scalar(out, dt, NULL); + if (*result == NULL) { + ret = -1; // Real error (should never happen). + } + } + bail: + Py_DECREF(dt); + return ret; +} /* * Main ufunc call implementation. @@ -4290,6 +4431,14 @@ ufunc_generic_fastcall(PyUFuncObject *ufunc, int errval; int nin = ufunc->nin, nout = ufunc->nout, nop = ufunc->nargs; + if (len_args == 1 && kwnames == NULL && !PyArray_Check(args[0]) + && nin == 1 && nout == 1 && !ufunc->core_enabled) { + // Possibly scalar input, try the fast path, falling back on failure. + PyObject *result = NULL; + if (try_trivial_scalar_call(ufunc, args[0], &result) != -2) { + return result; + } + } /* All following variables are cleared in the `fail` error path */ ufunc_full_args full_args = {NULL, NULL}; PyArrayObject *wheremask = NULL; @@ -4303,7 +4452,7 @@ ufunc_generic_fastcall(PyUFuncObject *ufunc, return NULL; } memset(scratch_objs, 0, sizeof(void *) * (nop * 4 + 2)); - + PyArray_DTypeMeta **signature = (PyArray_DTypeMeta **)scratch_objs; PyArrayObject **operands = (PyArrayObject **)(signature + nop); PyArray_DTypeMeta **operand_DTypes = (PyArray_DTypeMeta **)(operands + nop + 1); @@ -4319,10 +4468,11 @@ ufunc_generic_fastcall(PyUFuncObject *ufunc, /* Check number of arguments */ if (NPY_UNLIKELY((len_args < nin) || (len_args > nop))) { + const char *verb = (len_args == 1) ? "was" : "were"; PyErr_Format(PyExc_TypeError, - "%s() takes from %d to %d positional arguments but " - "%zd were given", - ufunc_get_name_cstr(ufunc) , nin, nop, len_args); + "%s() takes from %d to %d positional arguments but " + "%zd %s given", + ufunc_get_name_cstr(ufunc), nin, nop, len_args, verb); goto fail; } @@ -4363,6 +4513,21 @@ ufunc_generic_fastcall(PyUFuncObject *ufunc, Py_INCREF(tmp); PyTuple_SET_ITEM(full_args.out, i-nin, tmp); } + + /* Extra positional args but no keywords */ + /* DEPRECATED NumPy 2.4, 2025-08 */ + if ((PyObject *)ufunc == n_ops.maximum || (PyObject *)ufunc == n_ops.minimum) { + + if (DEPRECATE( + "Passing more than 2 positional arguments to np.maximum and np.minimum " + "is deprecated. If you meant to use the third argument as an output, " + "use the `out` keyword argument instead. If you hoped to work with " + "more than 2 inputs, combine them into a single array and get the extrema " + "for the relevant axis.") < 0) { + return NULL; + } + } + if (all_none) { Py_SETREF(full_args.out, NULL); } @@ -4472,6 +4637,15 @@ ufunc_generic_fastcall(PyUFuncObject *ufunc, return override; } + /* Warn if "where" is used without "out", issue 29561 */ + if ((where_obj != NULL) && (full_args.out == NULL) && (out_obj == NULL)) { + if (PyErr_WarnEx(PyExc_UserWarning, + "'where' used without 'out', expect unitialized memory in output. " + "If this is intentional, use out=None.", 1) < 0) { + goto fail; + } + } + if (outer) { /* Outer uses special preparation of inputs (expand dims) */ PyObject *new_in = prepare_input_arguments_for_outer(full_args.in, ufunc); @@ -4939,7 +5113,7 @@ PyUFunc_RegisterLoopForDescr(PyUFuncObject *ufunc, function, arg_typenums, data); if (result == 0) { - cobj = PyDict_GetItemWithError(ufunc->userloops, key); + cobj = PyDict_GetItemWithError(ufunc->userloops, key); // noqa: borrowed-ref OK if (cobj == NULL && PyErr_Occurred()) { result = -1; } @@ -5070,7 +5244,7 @@ PyUFunc_RegisterLoopForType(PyUFuncObject *ufunc, */ int add_new_loop = 1; for (Py_ssize_t j = 0; j < PyList_GET_SIZE(ufunc->_loops); j++) { - PyObject *item = PyList_GET_ITEM(ufunc->_loops, j); + PyObject *item = PyList_GET_ITEM(ufunc->_loops, j); // noqa: borrowed-ref OK PyObject *existing_tuple = PyTuple_GET_ITEM(item, 0); int cmp = PyObject_RichCompareBool(existing_tuple, signature_tuple, Py_EQ); @@ -5112,7 +5286,7 @@ PyUFunc_RegisterLoopForType(PyUFuncObject *ufunc, funcdata->nargs = 0; /* Get entry for this user-defined type*/ - cobj = PyDict_GetItemWithError(ufunc->userloops, key); + cobj = PyDict_GetItemWithError(ufunc->userloops, key); // noqa: borrowed-ref OK if (cobj == NULL && PyErr_Occurred()) { goto fail; } @@ -5897,11 +6071,10 @@ ufunc_at(PyUFuncObject *ufunc, PyObject *args) } } - PyArrayMethod_Context context = { - .caller = (PyObject *)ufunc, - .method = ufuncimpl, - .descriptors = operation_descrs, - }; + PyArrayMethod_Context context; + NPY_context_init(&context, operation_descrs); + context.caller = (PyObject *)ufunc; + context.method = ufuncimpl; /* Use contiguous strides; if there is such a loop it may be faster */ npy_intp strides[3] = { @@ -6411,8 +6584,8 @@ static struct PyMethodDef ufunc_methods[] = { }; -/****************************************************************************** - *** UFUNC GETSET *** +/***************************************************************************** + *** UFUNC GETSET *** *****************************************************************************/ @@ -6561,6 +6734,9 @@ static PyGetSetDef ufunc_getset[] = { {"__doc__", (getter)ufunc_get_doc, (setter)ufunc_set_doc, NULL, NULL}, + {"__name__", + (getter)ufunc_get_name, + NULL, NULL, NULL}, {"nin", (getter)ufunc_get_nin, NULL, NULL, NULL}, @@ -6576,15 +6752,13 @@ static PyGetSetDef ufunc_getset[] = { {"types", (getter)ufunc_get_types, NULL, NULL, NULL}, - {"__name__", - (getter)ufunc_get_name, - NULL, NULL, NULL}, {"identity", (getter)ufunc_get_identity, NULL, NULL, NULL}, {"signature", (getter)ufunc_get_signature, NULL, NULL, NULL}, + // __signature__ stored in `__dict__`, see `_globals._SignatureDescriptor` {NULL, NULL, NULL, NULL, NULL}, /* Sentinel */ }; diff --git a/numpy/_core/src/umath/ufunc_type_resolution.c b/numpy/_core/src/umath/ufunc_type_resolution.c index 95670efb936f..f5a203719b54 100644 --- a/numpy/_core/src/umath/ufunc_type_resolution.c +++ b/numpy/_core/src/umath/ufunc_type_resolution.c @@ -1252,9 +1252,10 @@ PyUFunc_DivisionTypeResolver(PyUFuncObject *ufunc, type_num2 = PyArray_DESCR(operands[1])->type_num; /* Use the default when datetime and timedelta are not involved */ - if (!PyTypeNum_ISDATETIME(type_num1) && !PyTypeNum_ISDATETIME(type_num2)) { - return PyUFunc_DefaultTypeResolver(ufunc, casting, operands, - type_tup, out_dtypes); + if ((!PyTypeNum_ISDATETIME(type_num1) && !PyTypeNum_ISDATETIME(type_num2)) || + (PyTypeNum_ISOBJECT(type_num1) || PyTypeNum_ISOBJECT(type_num2))) { + return PyUFunc_DefaultTypeResolver(ufunc, casting, operands, type_tup, + out_dtypes); } if (type_num1 == NPY_TIMEDELTA) { @@ -1455,7 +1456,7 @@ find_userloop(PyUFuncObject *ufunc, if (key == NULL) { return -1; } - obj = PyDict_GetItemWithError(ufunc->userloops, key); + obj = PyDict_GetItemWithError(ufunc->userloops, key); // noqa: borrowed-ref - manual fix needed Py_DECREF(key); if (obj == NULL && PyErr_Occurred()){ return -1; @@ -1742,7 +1743,7 @@ linear_search_userloop_type_resolver(PyUFuncObject *self, if (key == NULL) { return -1; } - obj = PyDict_GetItemWithError(self->userloops, key); + obj = PyDict_GetItemWithError(self->userloops, key); // noqa: borrowed-ref - manual fix needed Py_DECREF(key); if (obj == NULL && PyErr_Occurred()){ return -1; @@ -1813,7 +1814,7 @@ type_tuple_userloop_type_resolver(PyUFuncObject *self, if (key == NULL) { return -1; } - obj = PyDict_GetItemWithError(self->userloops, key); + obj = PyDict_GetItemWithError(self->userloops, key); // noqa: borrowed-ref - manual fix needed Py_DECREF(key); if (obj == NULL && PyErr_Occurred()){ return -1; diff --git a/numpy/_core/src/umath/umathmodule.c b/numpy/_core/src/umath/umathmodule.c index e5cf2cf8acb3..3efb02bd4a49 100644 --- a/numpy/_core/src/umath/umathmodule.c +++ b/numpy/_core/src/umath/umathmodule.c @@ -267,11 +267,11 @@ int initumath(PyObject *m) PyModule_AddObject(m, "NZERO", PyFloat_FromDouble(NPY_NZERO)); PyModule_AddObject(m, "NAN", PyFloat_FromDouble(NPY_NAN)); - s = PyDict_GetItemString(d, "divide"); + s = PyDict_GetItemString(d, "divide"); // noqa: borrowed-ref OK PyDict_SetItemString(d, "true_divide", s); - s = PyDict_GetItemString(d, "conjugate"); - s2 = PyDict_GetItemString(d, "remainder"); + s = PyDict_GetItemString(d, "conjugate"); // noqa: borrowed-ref OK + s2 = PyDict_GetItemString(d, "remainder"); // noqa: borrowed-ref OK /* Setup the array object's numerical structures with appropriate ufuncs in d*/ diff --git a/numpy/_core/src/umath/wrapping_array_method.c b/numpy/_core/src/umath/wrapping_array_method.c index 9b3970561f3f..924bac9524e9 100644 --- a/numpy/_core/src/umath/wrapping_array_method.c +++ b/numpy/_core/src/umath/wrapping_array_method.c @@ -83,8 +83,8 @@ typedef struct { #define WRAPPING_AUXDATA_FREELIST_SIZE 5 -static int wrapping_auxdata_freenum = 0; -static wrapping_auxdata *wrapping_auxdata_freelist[WRAPPING_AUXDATA_FREELIST_SIZE] = {NULL}; +static NPY_TLS int wrapping_auxdata_freenum = 0; +static NPY_TLS wrapping_auxdata *wrapping_auxdata_freelist[WRAPPING_AUXDATA_FREELIST_SIZE] = {NULL}; static void @@ -114,7 +114,7 @@ get_wrapping_auxdata(void) } else { res = PyMem_Calloc(1, sizeof(wrapping_auxdata)); - if (res < 0) { + if (res == NULL) { PyErr_NoMemory(); return NULL; } diff --git a/numpy/_core/strings.py b/numpy/_core/strings.py index fc0a2d0b4d1a..e9fa7f58e3ea 100644 --- a/numpy/_core/strings.py +++ b/numpy/_core/strings.py @@ -14,10 +14,8 @@ greater_equal, less, less_equal, - not_equal, -) -from numpy import ( multiply as _multiply_ufunc, + not_equal, ) from numpy._core.multiarray import _vec_string from numpy._core.overrides import array_function_dispatch, set_module @@ -40,6 +38,10 @@ _strip_chars, _strip_whitespace, _zfill, + count as _count_ufunc, + endswith as _endswith_ufunc, + find as _find_ufunc, + index as _index_ufunc, isalnum, isalpha, isdecimal, @@ -49,28 +51,10 @@ isspace, istitle, isupper, - str_len, -) -from numpy._core.umath import ( - count as _count_ufunc, -) -from numpy._core.umath import ( - endswith as _endswith_ufunc, -) -from numpy._core.umath import ( - find as _find_ufunc, -) -from numpy._core.umath import ( - index as _index_ufunc, -) -from numpy._core.umath import ( rfind as _rfind_ufunc, -) -from numpy._core.umath import ( rindex as _rindex_ufunc, -) -from numpy._core.umath import ( startswith as _startswith_ufunc, + str_len, ) @@ -218,7 +202,7 @@ def multiply(a, i): # Ensure we can do a_len * i without overflow. if np.any(a_len > sys.maxsize / np.maximum(i, 1)): - raise MemoryError("repeated string is too long") + raise OverflowError("Overflow encountered in string multiply") buffersizes = a_len * i out_dtype = f"{a.dtype.char}{buffersizes.max()}" @@ -1743,7 +1727,7 @@ def translate(a, table, deletechars=None): ) @set_module("numpy.strings") -def slice(a, start=None, stop=None, step=None, /): +def slice(a, start=None, stop=np._NoValue, step=None, /): """ Slice the strings in `a` by slices specified by `start`, `stop`, `step`. Like in the regular Python `slice` object, if only `start` is @@ -1776,6 +1760,9 @@ def slice(a, start=None, stop=None, step=None, /): >>> np.strings.slice(a, 2) array(['he', 'wo'], dtype='>> np.strings.slice(a, 2, None) + array(['llo', 'rld'], dtype='>> np.strings.slice(a, 1, 5, 2) array(['el', 'ol'], dtype='>> np.strings.slice(b, -2) array(['hello wor', 'ÎŗÎĩΚι ΃ÎŋĪ… ÎēΌ΃', 'äŊ åĨŊ', '👋'], dtype=StringDType()) + >>> np.strings.slice(b, -2, None) + array(['ld', 'ÎŧÎĩ', 'ä¸–į•Œ', ' 🌍'], dtype=StringDType()) + >>> np.strings.slice(b, [3, -10, 2, -3], [-1, -2, -1, 3]) array(['lo worl', ' ΃ÎŋĪ… ÎēΌ΃', '世', '👋 🌍'], dtype=StringDType()) @@ -1801,7 +1791,7 @@ def slice(a, start=None, stop=None, step=None, /): """ # Just like in the construction of a regular slice object, if only start # is specified then start will become stop, see logic in slice_new. - if stop is None: + if stop is np._NoValue: stop = start start = None diff --git a/numpy/_core/strings.pyi b/numpy/_core/strings.pyi index 52d244b36ccd..475da159f783 100644 --- a/numpy/_core/strings.pyi +++ b/numpy/_core/strings.pyi @@ -1,12 +1,17 @@ -from typing import TypeAlias, overload +from typing import overload import numpy as np -from numpy._typing import NDArray, _AnyShape, _SupportsArray -from numpy._typing import _ArrayLikeAnyString_co as UST_co -from numpy._typing import _ArrayLikeBytes_co as S_co -from numpy._typing import _ArrayLikeInt_co as i_co -from numpy._typing import _ArrayLikeStr_co as U_co -from numpy._typing import _ArrayLikeString_co as T_co +from numpy._globals import _NoValueType +from numpy._typing import ( + NDArray, + _AnyShape, + _ArrayLikeAnyString_co as UST_co, + _ArrayLikeBytes_co as S_co, + _ArrayLikeInt_co as i_co, + _ArrayLikeStr_co as U_co, + _ArrayLikeString_co as T_co, + _SupportsArray, +) __all__ = [ "add", @@ -54,11 +59,12 @@ __all__ = [ "translate", "upper", "zfill", + "slice", ] -_StringDTypeArray: TypeAlias = np.ndarray[_AnyShape, np.dtypes.StringDType] -_StringDTypeSupportsArray: TypeAlias = _SupportsArray[np.dtypes.StringDType] -_StringDTypeOrUnicodeArray: TypeAlias = np.ndarray[_AnyShape, np.dtype[np.str_]] | _StringDTypeArray +type _StringDTypeArray = np.ndarray[_AnyShape, np.dtypes.StringDType] +type _StringDTypeSupportsArray = _SupportsArray[np.dtypes.StringDType] +type _StringDTypeOrUnicodeArray = NDArray[np.str_] | _StringDTypeArray @overload def equal(x1: U_co, x2: U_co) -> NDArray[np.bool]: ... @@ -145,154 +151,154 @@ def str_len(x: UST_co) -> NDArray[np.int_]: ... def find( a: U_co, sub: U_co, - start: i_co = ..., - end: i_co | None = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[np.int_]: ... @overload def find( a: S_co, sub: S_co, - start: i_co = ..., - end: i_co | None = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[np.int_]: ... @overload def find( a: T_co, sub: T_co, - start: i_co = ..., - end: i_co | None = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[np.int_]: ... @overload def rfind( a: U_co, sub: U_co, - start: i_co = ..., - end: i_co | None = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[np.int_]: ... @overload def rfind( a: S_co, sub: S_co, - start: i_co = ..., - end: i_co | None = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[np.int_]: ... @overload def rfind( a: T_co, sub: T_co, - start: i_co = ..., - end: i_co | None = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[np.int_]: ... @overload def index( a: U_co, sub: U_co, - start: i_co = ..., - end: i_co | None = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[np.int_]: ... @overload def index( a: S_co, sub: S_co, - start: i_co = ..., - end: i_co | None = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[np.int_]: ... @overload def index( a: T_co, sub: T_co, - start: i_co = ..., - end: i_co | None = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[np.int_]: ... @overload def rindex( a: U_co, sub: U_co, - start: i_co = ..., - end: i_co | None = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[np.int_]: ... @overload def rindex( a: S_co, sub: S_co, - start: i_co = ..., - end: i_co | None = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[np.int_]: ... @overload def rindex( a: T_co, sub: T_co, - start: i_co = ..., - end: i_co | None = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[np.int_]: ... @overload def count( a: U_co, sub: U_co, - start: i_co = ..., - end: i_co | None = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[np.int_]: ... @overload def count( a: S_co, sub: S_co, - start: i_co = ..., - end: i_co | None = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[np.int_]: ... @overload def count( a: T_co, sub: T_co, - start: i_co = ..., - end: i_co | None = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[np.int_]: ... @overload def startswith( a: U_co, prefix: U_co, - start: i_co = ..., - end: i_co | None = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[np.bool]: ... @overload def startswith( a: S_co, prefix: S_co, - start: i_co = ..., - end: i_co | None = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[np.bool]: ... @overload def startswith( a: T_co, prefix: T_co, - start: i_co = ..., - end: i_co | None = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[np.bool]: ... @overload def endswith( a: U_co, suffix: U_co, - start: i_co = ..., - end: i_co | None = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[np.bool]: ... @overload def endswith( a: S_co, suffix: S_co, - start: i_co = ..., - end: i_co | None = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[np.bool]: ... @overload def endswith( a: T_co, suffix: T_co, - start: i_co = ..., - end: i_co | None = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[np.bool]: ... def decode( @@ -307,13 +313,13 @@ def encode( ) -> NDArray[np.bytes_]: ... @overload -def expandtabs(a: U_co, tabsize: i_co = ...) -> NDArray[np.str_]: ... +def expandtabs(a: U_co, tabsize: i_co = 8) -> NDArray[np.str_]: ... @overload -def expandtabs(a: S_co, tabsize: i_co = ...) -> NDArray[np.bytes_]: ... +def expandtabs(a: S_co, tabsize: i_co = 8) -> NDArray[np.bytes_]: ... @overload -def expandtabs(a: _StringDTypeSupportsArray, tabsize: i_co = ...) -> _StringDTypeArray: ... +def expandtabs(a: _StringDTypeSupportsArray, tabsize: i_co = 8) -> _StringDTypeArray: ... @overload -def expandtabs(a: T_co, tabsize: i_co = ...) -> _StringDTypeOrUnicodeArray: ... +def expandtabs(a: T_co, tabsize: i_co = 8) -> _StringDTypeOrUnicodeArray: ... @overload def center(a: U_co, width: i_co, fillchar: UST_co = " ") -> NDArray[np.str_]: ... @@ -428,28 +434,28 @@ def replace( a: U_co, old: U_co, new: U_co, - count: i_co = ..., + count: i_co = -1, ) -> NDArray[np.str_]: ... @overload def replace( a: S_co, old: S_co, new: S_co, - count: i_co = ..., + count: i_co = -1, ) -> NDArray[np.bytes_]: ... @overload def replace( a: _StringDTypeSupportsArray, old: _StringDTypeSupportsArray, new: _StringDTypeSupportsArray, - count: i_co = ..., + count: i_co = -1, ) -> _StringDTypeArray: ... @overload def replace( a: T_co, old: T_co, new: T_co, - count: i_co = ..., + count: i_co = -1, ) -> _StringDTypeOrUnicodeArray: ... @overload @@ -494,3 +500,37 @@ def translate( table: str, deletechars: str | None = None, ) -> _StringDTypeOrUnicodeArray: ... + +# +@overload +def slice( + a: U_co, + start: i_co | None = None, + stop: i_co | _NoValueType | None = ..., # = np._NoValue + step: i_co | None = None, + /, +) -> NDArray[np.str_]: ... +@overload +def slice( + a: S_co, + start: i_co | None = None, + stop: i_co | _NoValueType | None = ..., # = np._NoValue + step: i_co | None = None, + /, +) -> NDArray[np.bytes_]: ... +@overload +def slice( + a: _StringDTypeSupportsArray, + start: i_co | None = None, + stop: i_co | _NoValueType | None = ..., # = np._NoValue + step: i_co | None = None, + /, +) -> _StringDTypeArray: ... +@overload +def slice( + a: T_co, + start: i_co | None = None, + stop: i_co | _NoValueType | None = ..., # = np._NoValue + step: i_co | None = None, + /, +) -> _StringDTypeOrUnicodeArray: ... diff --git a/numpy/_core/tests/_natype.py b/numpy/_core/tests/_natype.py index 1c2175b35933..539dfd2b36e1 100644 --- a/numpy/_core/tests/_natype.py +++ b/numpy/_core/tests/_natype.py @@ -74,8 +74,7 @@ def __bool__(self): raise TypeError("boolean value of NA is ambiguous") def __hash__(self): - exponent = 31 if is_32bit else 61 - return 2**exponent - 1 + return 2**61 - 1 def __reduce__(self): return "pd_NA" @@ -114,33 +113,6 @@ def __reduce__(self): __abs__ = _create_unary_propagating_op("__abs__") __invert__ = _create_unary_propagating_op("__invert__") - # pow has special - def __pow__(self, other): - if other is pd_NA: - return pd_NA - elif isinstance(other, (numbers.Number, np.bool)): - if other == 0: - # returning positive is correct for +/- 0. - return type(other)(1) - else: - return pd_NA - elif util.is_array(other): - return np.where(other == 0, other.dtype.type(1), pd_NA) - - return NotImplemented - - def __rpow__(self, other): - if other is pd_NA: - return pd_NA - elif isinstance(other, (numbers.Number, np.bool)): - if other == 1: - return other - else: - return pd_NA - elif util.is_array(other): - return np.where(other == 1, other, pd_NA) - return NotImplemented - # Logical ops using Kleene logic def __and__(self, other): @@ -168,38 +140,5 @@ def __xor__(self, other): __rxor__ = __xor__ - __array_priority__ = 1000 - _HANDLED_TYPES = (np.ndarray, numbers.Number, str, np.bool) - - def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): - types = self._HANDLED_TYPES + (NAType,) - for x in inputs: - if not isinstance(x, types): - return NotImplemented - - if method != "__call__": - raise ValueError(f"ufunc method '{method}' not supported for NA") - result = maybe_dispatch_ufunc_to_dunder_op( - self, ufunc, method, *inputs, **kwargs - ) - if result is NotImplemented: - # For a NumPy ufunc that's not a binop, like np.logaddexp - index = next(i for i, x in enumerate(inputs) if x is pd_NA) - result = np.broadcast_arrays(*inputs)[index] - if result.ndim == 0: - result = result.item() - if ufunc.nout > 1: - result = (pd_NA,) * ufunc.nout - - return result - pd_NA = NAType() - - -def get_stringdtype_dtype(na_object, coerce=True): - # explicit is check for pd_NA because != with pd_NA returns pd_NA - if na_object is pd_NA or na_object != "unset": - return np.dtypes.StringDType(na_object=na_object, coerce=coerce) - else: - return np.dtypes.StringDType(coerce=coerce) diff --git a/numpy/_core/tests/examples/cython/checks.pyx b/numpy/_core/tests/examples/cython/checks.pyx index 57df05c1e3b5..f0f427d2167f 100644 --- a/numpy/_core/tests/examples/cython/checks.pyx +++ b/numpy/_core/tests/examples/cython/checks.pyx @@ -244,9 +244,9 @@ def npyiter_has_multi_index(it: "nditer"): def test_get_multi_index_iter_next(it: "nditer", cnp.ndarray[cnp.float64_t, ndim=2] arr): cdef cnp.NpyIter* cit = npyiter_from_nditer_obj(it) - cdef cnp.NpyIter_GetMultiIndexFunc get_multi_index = \ + cdef cnp.NpyIter_GetMultiIndexFunc _get_multi_index = \ cnp.NpyIter_GetGetMultiIndex(cit, NULL) - cdef cnp.NpyIter_IterNextFunc iternext = \ + cdef cnp.NpyIter_IterNextFunc _iternext = \ cnp.NpyIter_GetIterNext(cit, NULL) return 1 diff --git a/numpy/_core/tests/examples/cython/setup.py b/numpy/_core/tests/examples/cython/setup.py index eb57477fc2a1..ba0639ebcf1c 100644 --- a/numpy/_core/tests/examples/cython/setup.py +++ b/numpy/_core/tests/examples/cython/setup.py @@ -4,10 +4,10 @@ """ import os -from distutils.core import setup import Cython from Cython.Build import cythonize +from distutils.core import setup from setuptools.extension import Extension import numpy as np diff --git a/numpy/_core/tests/examples/limited_api/limited_api_latest.c b/numpy/_core/tests/examples/limited_api/limited_api_latest.c index 13668f2f0ebf..92d83ea977a1 100644 --- a/numpy/_core/tests/examples/limited_api/limited_api_latest.c +++ b/numpy/_core/tests/examples/limited_api/limited_api_latest.c @@ -1,11 +1,11 @@ -#if Py_LIMITED_API != PY_VERSION_HEX & 0xffff0000 - # error "Py_LIMITED_API not defined to Python major+minor version" -#endif - #include #include #include +#if Py_LIMITED_API != PY_VERSION_HEX & 0xffff0000 + # error "Py_LIMITED_API not defined to Python major+minor version" +#endif + static PyModuleDef moduledef = { .m_base = PyModuleDef_HEAD_INIT, .m_name = "limited_api_latest" diff --git a/numpy/_core/tests/test_api.py b/numpy/_core/tests/test_api.py index 25990536809b..216a2c75afb8 100644 --- a/numpy/_core/tests/test_api.py +++ b/numpy/_core/tests/test_api.py @@ -1,17 +1,17 @@ import sys import pytest -from numpy._core._rational_tests import rational import numpy as np import numpy._core.umath as ncu +from numpy._core._rational_tests import rational +from numpy.lib import stride_tricks from numpy.testing import ( HAS_REFCOUNT, assert_, assert_array_equal, assert_equal, assert_raises, - assert_warns, ) @@ -88,10 +88,10 @@ def test_array_array(): o = type("o", (object,), {"__array_struct__": a.__array_struct__}) # wasn't what I expected... is np.array(o) supposed to equal a ? - # instead we get a array([...], dtype=">V18") + # instead we get an array([...], dtype=">V18") assert_equal(bytes(np.array(o).data), bytes(a.data)) - # test array + # test __array__ def custom__array__(self, dtype=None, copy=None): return np.array(100.0, dtype=dtype, copy=copy) @@ -157,6 +157,39 @@ def custom__array__(self, dtype=None, copy=None): assert_equal(np.array([(1.0,) * 10] * 10, dtype=np.float64), np.ones((10, 10), dtype=np.float64)) + +@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts") +def test___array___refcount(): + class MyArray: + def __init__(self, dtype): + self.val = np.array(-1, dtype=dtype) + + def __array__(self, dtype=None, copy=None): + return self.val.__array__(dtype=dtype, copy=copy) + + # test all possible scenarios: + # dtype(none | same | different) x copy(true | false | none) + dt = np.dtype(np.int32) + old_refcount = sys.getrefcount(dt) + np.array(MyArray(dt)) + assert_equal(old_refcount, sys.getrefcount(dt)) + np.array(MyArray(dt), dtype=dt) + assert_equal(old_refcount, sys.getrefcount(dt)) + np.array(MyArray(dt), copy=None) + assert_equal(old_refcount, sys.getrefcount(dt)) + np.array(MyArray(dt), dtype=dt, copy=None) + assert_equal(old_refcount, sys.getrefcount(dt)) + dt2 = np.dtype(np.int16) + old_refcount2 = sys.getrefcount(dt2) + np.array(MyArray(dt), dtype=dt2) + assert_equal(old_refcount2, sys.getrefcount(dt2)) + np.array(MyArray(dt), dtype=dt2, copy=None) + assert_equal(old_refcount2, sys.getrefcount(dt2)) + with pytest.raises(ValueError): + np.array(MyArray(dt), dtype=dt2, copy=False) + assert_equal(old_refcount2, sys.getrefcount(dt2)) + + @pytest.mark.parametrize("array", [True, False]) def test_array_impossible_casts(array): # All builtin types can be forcibly cast, at least theoretically, @@ -314,7 +347,7 @@ def test_object_array_astype_to_void(): def test_array_astype_warning(t): # test ComplexWarning when casting from complex to float or int a = np.array(10, dtype=np.complex128) - assert_warns(np.exceptions.ComplexWarning, a.astype, t) + pytest.warns(np.exceptions.ComplexWarning, a.astype, t) @pytest.mark.parametrize(["dtype", "out_dtype"], [(np.bytes_, np.bool), @@ -558,7 +591,7 @@ def check_copy_result(x, y, ccontig, fcontig, strides=False): def test_contiguous_flags(): a = np.ones((4, 4, 1))[::2, :, :] - a.strides = a.strides[:2] + (-123,) + a = stride_tricks.as_strided(a, strides=a.strides[:2] + (-123,)) b = np.ones((2, 2, 1, 2, 2)).swapaxes(3, 4) def check_contig(a, ccontig, fcontig): @@ -590,11 +623,12 @@ def check_contig(a, ccontig, fcontig): def test_broadcast_arrays(): # Test user defined dtypes - a = np.array([(1, 2, 3)], dtype='u4,u4,u4') - b = np.array([(1, 2, 3), (4, 5, 6), (7, 8, 9)], dtype='u4,u4,u4') + dtype = 'u4,u4,u4' + a = np.array([(1, 2, 3)], dtype=dtype) + b = np.array([(1, 2, 3), (4, 5, 6), (7, 8, 9)], dtype=dtype) result = np.broadcast_arrays(a, b) - assert_equal(result[0], np.array([(1, 2, 3), (1, 2, 3), (1, 2, 3)], dtype='u4,u4,u4')) - assert_equal(result[1], np.array([(1, 2, 3), (4, 5, 6), (7, 8, 9)], dtype='u4,u4,u4')) + assert_equal(result[0], np.array([(1, 2, 3), (1, 2, 3), (1, 2, 3)], dtype=dtype)) + assert_equal(result[1], np.array([(1, 2, 3), (4, 5, 6), (7, 8, 9)], dtype=dtype)) @pytest.mark.parametrize(["shape", "fill_value", "expected_output"], [((2, 2), [5.0, 6.0], np.array([[5.0, 6.0], [5.0, 6.0]])), diff --git a/numpy/_core/tests/test_argparse.py b/numpy/_core/tests/test_argparse.py index 7f949c1059eb..ededced3b9fe 100644 --- a/numpy/_core/tests/test_argparse.py +++ b/numpy/_core/tests/test_argparse.py @@ -14,14 +14,12 @@ def func(arg1, /, arg2, *, arg3): import threading import pytest + +import numpy as np from numpy._core._multiarray_tests import ( argparse_example_function as func, -) -from numpy._core._multiarray_tests import ( threaded_argparse_example_function as thread_func, ) - -import numpy as np from numpy.testing import IS_WASM diff --git a/numpy/_core/tests/test_array_coercion.py b/numpy/_core/tests/test_array_coercion.py index 883aee63ac3a..9c8c4a09cfc9 100644 --- a/numpy/_core/tests/test_array_coercion.py +++ b/numpy/_core/tests/test_array_coercion.py @@ -6,13 +6,13 @@ from itertools import permutations, product -import numpy._core._multiarray_umath as ncu import pytest -from numpy._core._rational_tests import rational from pytest import param import numpy as np -from numpy.testing import IS_64BIT, IS_PYPY, assert_array_equal +import numpy._core._multiarray_umath as ncu +from numpy._core._rational_tests import rational +from numpy.testing import IS_64BIT, assert_array_equal def arraylikes(): @@ -266,11 +266,6 @@ def test_scalar_coercion(self, scalar): # Ensure we have a full-precision number if available scalar = type(scalar)((scalar * 2)**0.5) - if type(scalar) is rational: - # Rational generally fails due to a missing cast. In the future - # object casts should automatically be defined based on `setitem`. - pytest.xfail("Rational to object cast is undefined currently.") - # Use casting from object: arr = np.array(scalar, dtype=object).astype(scalar.dtype) @@ -287,7 +282,6 @@ def test_scalar_coercion(self, scalar): assert_array_equal(arr, arr3) assert_array_equal(arr, arr4) - @pytest.mark.xfail(IS_PYPY, reason="`int(np.complex128(3))` fails on PyPy") @pytest.mark.filterwarnings("ignore::numpy.exceptions.ComplexWarning") @pytest.mark.parametrize("cast_to", scalar_instances()) def test_scalar_coercion_same_as_cast_and_assignment(self, cast_to): @@ -717,6 +711,7 @@ def __array__(self, dtype=None, copy=None): assert arr[0] is ArrayLike @pytest.mark.skipif(not IS_64BIT, reason="Needs 64bit platform") + @pytest.mark.thread_unsafe(reason="large slow test in parallel") def test_too_large_array_error_paths(self): """Test the error paths, including for memory leaks""" arr = np.array(0, dtype="uint8") @@ -909,3 +904,24 @@ def test_empty_string(): assert_array_equal(res, b"") assert res.shape == (2, 10) assert res.dtype == "S1" + + +@pytest.mark.parametrize("dtype", ["S", "U", object]) +@pytest.mark.parametrize("res_dt,hug_val", + [("float16", "1e30"), ("float32", "1e200")]) +def test_string_to_float_coercion_errors(dtype, res_dt, hug_val): + # This test primarly tests setitem + val = np.array(["3M"], dtype=dtype)[0] # use the scalar + + with pytest.raises(ValueError): + np.array(val, dtype=res_dt) + + val = np.array([hug_val], dtype=dtype)[0] # use the scalar + + with np.errstate(all="warn"): + with pytest.warns(RuntimeWarning): + np.array(val, dtype=res_dt) + + with np.errstate(all="raise"): + with pytest.raises(FloatingPointError): + np.array(val, dtype=res_dt) diff --git a/numpy/_core/tests/test_arraymethod.py b/numpy/_core/tests/test_arraymethod.py index d8baef7e7fbf..5b3d51585718 100644 --- a/numpy/_core/tests/test_arraymethod.py +++ b/numpy/_core/tests/test_arraymethod.py @@ -7,9 +7,9 @@ from typing import Any import pytest -from numpy._core._multiarray_umath import _get_castingimpl as get_castingimpl import numpy as np +from numpy._core._multiarray_umath import _get_castingimpl as get_castingimpl class TestResolveDescriptors: diff --git a/numpy/_core/tests/test_arrayprint.py b/numpy/_core/tests/test_arrayprint.py index 1fd4ac2fddb7..e6cbb6f72229 100644 --- a/numpy/_core/tests/test_arrayprint.py +++ b/numpy/_core/tests/test_arrayprint.py @@ -15,7 +15,6 @@ assert_equal, assert_raises, assert_raises_regex, - assert_warns, ) from numpy.testing._private.utils import run_threaded @@ -278,11 +277,12 @@ def test_structure_format_mixed(self): # for issue #5692 A = np.zeros(shape=10, dtype=[("A", "M8[s]")]) A[5:].fill(np.datetime64('NaT')) + date_string = '1970-01-01T00:00:00' assert_equal( np.array2string(A), - textwrap.dedent("""\ - [('1970-01-01T00:00:00',) ('1970-01-01T00:00:00',) ('1970-01-01T00:00:00',) - ('1970-01-01T00:00:00',) ('1970-01-01T00:00:00',) ('NaT',) ('NaT',) + textwrap.dedent(f"""\ + [('{date_string}',) ('{date_string}',) ('{date_string}',) + ('{date_string}',) ('{date_string}',) ('NaT',) ('NaT',) ('NaT',) ('NaT',) ('NaT',)]""") ) finally: @@ -547,6 +547,7 @@ def test_any_text(self, text): assert_equal(result, expected_repr) @pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts") + @pytest.mark.thread_unsafe(reason="garbage collector is global state") def test_refcount(self): # make sure we do not hold references to the array due to a recursive # closure (gh-10620) @@ -735,12 +736,7 @@ def test_0d_arrays(self): # str is unaffected assert_equal(str(x), "1") - # check `style` arg raises - assert_warns(DeprecationWarning, np.array2string, - np.array(1.), style=repr) - # but not in legacy mode - np.array2string(np.array(1.), style=repr, legacy='1.13') - # gh-10934 style was broken in legacy mode, check it works + # check it works np.array2string(np.array(1.), legacy='1.13') def test_float_spacing(self): @@ -1272,8 +1268,6 @@ def test_scalar_void_float_str(): assert str(scalar) == "(1.0, 2.0)" @pytest.mark.skipif(IS_WASM, reason="wasm doesn't support asyncio") -@pytest.mark.skipif(sys.version_info < (3, 11), - reason="asyncio.barrier was added in Python 3.11") def test_printoptions_asyncio_safe(): asyncio = pytest.importorskip("asyncio") @@ -1321,6 +1315,7 @@ async def main(): loop.close() @pytest.mark.skipif(IS_WASM, reason="wasm doesn't support threads") +@pytest.mark.thread_unsafe(reason="test is already explicitly multi-threaded") def test_multithreaded_array_printing(): # the dragon4 implementation uses a static scratch space for performance # reasons this test makes sure it is set up in a thread-safe manner diff --git a/numpy/_core/tests/test_casting_unittests.py b/numpy/_core/tests/test_casting_unittests.py index f8441ea9d0d7..5f643f8045ba 100644 --- a/numpy/_core/tests/test_casting_unittests.py +++ b/numpy/_core/tests/test_casting_unittests.py @@ -10,13 +10,14 @@ import enum import random import textwrap +import warnings import pytest -from numpy._core._multiarray_umath import _get_castingimpl as get_castingimpl import numpy as np +from numpy._core._multiarray_umath import _get_castingimpl as get_castingimpl from numpy.lib.stride_tricks import as_strided -from numpy.testing import assert_array_equal +from numpy.testing import assert_array_equal, assert_equal # Simple skips object, parametric and long double (unsupported by struct) simple_dtypes = "?bhilqBHILQefdFD" @@ -76,8 +77,11 @@ class Casting(enum.IntEnum): safe = 2 same_kind = 3 unsafe = 4 + same_value = 64 +same_value_dtypes = tuple(type(np.dtype(c)) for c in "?bhilqBHILQefdgFDG") + def _get_cancast_table(): table = textwrap.dedent(""" X ? b h i l q B H I L Q e f d g F D G S U V O M m @@ -117,6 +121,9 @@ def _get_cancast_table(): cancast[from_dt] = {} for to_dt, c in zip(dtypes, row[2::2]): cancast[from_dt][to_dt] = convert_cast[c] + # Of the types checked, numeric cast support same-value + if from_dt in same_value_dtypes and to_dt in same_value_dtypes: + cancast[from_dt][to_dt] |= Casting.same_value return cancast @@ -272,9 +279,11 @@ def test_simple_cancast(self, from_Dt): if view_off is not None: # If a view is acceptable, this is "no" casting # and byte order must be matching. - assert casting == Casting.no - # The above table lists this as "equivalent" - assert Casting.equiv == CAST_TABLE[from_Dt][to_Dt] + assert casting == Casting.no | Casting.same_value + # The above table lists this as "equivalent", perhaps + # with "same_value" + v = CAST_TABLE[from_Dt][to_Dt] & ~Casting.same_value + assert Casting.equiv == v # Note that to_res may not be the same as from_dt assert from_res.isnative == to_res.isnative else: @@ -304,6 +313,7 @@ def test_simple_direct_casts(self, from_dt): to_dt = to_dt.values[0] cast = get_castingimpl(type(from_dt), type(to_dt)) + # print("from_dt", from_dt, "to_dt", to_dt) casting, (from_res, to_res), view_off = cast._resolve_descriptors( (from_dt, to_dt)) @@ -317,7 +327,9 @@ def test_simple_direct_casts(self, from_dt): arr1, arr2, values = self.get_data(from_dt, to_dt) + # print("2", arr1, arr2, cast) cast._simple_strided_call((arr1, arr2)) + # print("3") # Check via python list assert arr2.tolist() == values @@ -815,3 +827,129 @@ def test_nonstandard_bool_to_other(self, dtype): res = nonstandard_bools.astype(dtype) expected = [0, 1, 1] assert_array_equal(res, expected) + + @pytest.mark.parametrize("to_dtype", + np.typecodes["AllInteger"] + np.typecodes["AllFloat"]) + @pytest.mark.parametrize("from_dtype", + np.typecodes["AllInteger"] + np.typecodes["AllFloat"]) + @pytest.mark.filterwarnings("ignore::numpy.exceptions.ComplexWarning") + def test_same_value_overflow(self, from_dtype, to_dtype): + if from_dtype == to_dtype: + return + top1 = 0 + top2 = 0 + try: + top1 = np.iinfo(from_dtype).max + except ValueError: + top1 = np.finfo(from_dtype).max + try: + top2 = np.iinfo(to_dtype).max + except ValueError: + top2 = np.finfo(to_dtype).max + # No need to test if top2 > top1, since the test will also do the + # reverse dtype matching. Catch then warning if the comparison warns, + # i.e. np.int16(65535) < np.float16(6.55e4) + with warnings.catch_warnings(record=True): + warnings.simplefilter("always", RuntimeWarning) + if top2 >= top1: + # will be tested when the dtypes are reversed + return + # Happy path + arr1 = np.array([0] * 10, dtype=from_dtype) + arr2 = np.array([0] * 10, dtype=to_dtype) + arr1_astype = arr1.astype(to_dtype, casting='same_value') + assert_equal(arr1_astype, arr2, strict=True) + # Make it overflow, both aligned and unaligned + arr1[0] = top1 + aligned = np.empty(arr1.itemsize * arr1.size + 1, 'uint8') + unaligned = aligned[1:].view(arr1.dtype) + unaligned[:] = arr1 + with pytest.raises(ValueError): + # Casting float to float with overflow should raise + # RuntimeWarning (fperror) + # Casting float to int with overflow sometimes raises + # RuntimeWarning (fperror) + # Casting with overflow and 'same_value', should raise ValueError + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter("always", RuntimeWarning) + arr1.astype(to_dtype, casting='same_value') + assert len(w) < 2 + with pytest.raises(ValueError): + # again, unaligned + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter("always", RuntimeWarning) + unaligned.astype(to_dtype, casting='same_value') + assert len(w) < 2 + + @pytest.mark.parametrize("to_dtype", + np.typecodes["AllInteger"]) + @pytest.mark.parametrize("from_dtype", + np.typecodes["AllFloat"]) + @pytest.mark.filterwarnings("ignore::RuntimeWarning") + def test_same_value_float_to_int(self, from_dtype, to_dtype): + # Should not raise, since the values can round trip + arr1 = np.arange(10, dtype=from_dtype) + aligned = np.empty(arr1.itemsize * arr1.size + 1, 'uint8') + unaligned = aligned[1:].view(arr1.dtype) + unaligned[:] = arr1 + arr2 = np.arange(10, dtype=to_dtype) + assert_array_equal(arr1.astype(to_dtype, casting='same_value'), arr2) + assert_array_equal(unaligned.astype(to_dtype, casting='same_value'), arr2) + + # Should raise, since values cannot round trip. Might warn too about + # FPE errors + arr1_66 = arr1 + 0.666 + unaligned_66 = unaligned + 0.66 + with pytest.raises(ValueError): + arr1_66.astype(to_dtype, casting='same_value') + with pytest.raises(ValueError): + unaligned_66.astype(to_dtype, casting='same_value') + + @pytest.mark.parametrize("to_dtype", + np.typecodes["AllInteger"]) + @pytest.mark.parametrize("from_dtype", + np.typecodes["AllFloat"]) + @pytest.mark.filterwarnings("ignore::RuntimeWarning") + def test_same_value_float_to_int_scalar(self, from_dtype, to_dtype): + # Should not raise, since the values can round trip + s1 = np.array(10, dtype=from_dtype) + assert s1.astype(to_dtype, casting='same_value') == 10 + + # Should raise, since values cannot round trip + s1_66 = s1 + 0.666 + with pytest.raises(ValueError): + s1_66.astype(to_dtype, casting='same_value') + + @pytest.mark.parametrize("value", [np.nan, np.inf, -np.inf]) + @pytest.mark.filterwarnings("ignore::numpy.exceptions.ComplexWarning") + @pytest.mark.filterwarnings("ignore::RuntimeWarning") + def test_same_value_naninf(self, value): + # These work, but may trigger FPE warnings on macOS + np.array([value], dtype=np.half).astype(np.cdouble, casting='same_value') + np.array([value], dtype=np.half).astype(np.double, casting='same_value') + np.array([value], dtype=np.float32).astype(np.cdouble, casting='same_value') + np.array([value], dtype=np.float32).astype(np.double, casting='same_value') + np.array([value], dtype=np.float32).astype(np.half, casting='same_value') + np.array([value], dtype=np.complex64).astype(np.half, casting='same_value') + # These fail + with pytest.raises(ValueError): + np.array([value], dtype=np.half).astype(np.int64, casting='same_value') + with pytest.raises(ValueError): + np.array([value], dtype=np.complex64).astype(np.int64, casting='same_value') + with pytest.raises(ValueError): + np.array([value], dtype=np.float32).astype(np.int64, casting='same_value') + + @pytest.mark.filterwarnings("ignore::numpy.exceptions.ComplexWarning") + def test_same_value_complex(self): + arr = np.array([complex(1, 1)], dtype=np.cdouble) + # This works + arr.astype(np.complex64, casting='same_value') + # Casting with a non-zero imag part fails + with pytest.raises(ValueError): + arr.astype(np.float32, casting='same_value') + + def test_same_value_scalar(self): + i = np.array(123, dtype=np.int64) + f = np.array(123, dtype=np.float64) + assert i.astype(np.float64, casting='same_value') == f + assert f.astype(np.int64, casting='same_value') == f diff --git a/numpy/_core/tests/test_conversion_utils.py b/numpy/_core/tests/test_conversion_utils.py index 03ba33957821..067c2973c592 100644 --- a/numpy/_core/tests/test_conversion_utils.py +++ b/numpy/_core/tests/test_conversion_utils.py @@ -3,9 +3,9 @@ """ import re -import numpy._core._multiarray_tests as mt import pytest +import numpy._core._multiarray_tests as mt from numpy._core.multiarray import CLIP, RAISE, WRAP from numpy.testing import assert_raises @@ -172,9 +172,12 @@ def test_valid(self): self._check("no", "NPY_NO_CASTING") self._check("equiv", "NPY_EQUIV_CASTING") self._check("safe", "NPY_SAFE_CASTING") - self._check("same_kind", "NPY_SAME_KIND_CASTING") self._check("unsafe", "NPY_UNSAFE_CASTING") + self._check("same_kind", "NPY_SAME_KIND_CASTING") + def test_invalid(self): + # Currently, 'same_value' is supported only in ndarray.astype + self._check_value_error("same_value") class TestIntpConverter: """ Tests of PyArray_IntpConverter """ diff --git a/numpy/_core/tests/test_cpu_dispatcher.py b/numpy/_core/tests/test_cpu_dispatcher.py index 0a47685d0397..04acf13c228d 100644 --- a/numpy/_core/tests/test_cpu_dispatcher.py +++ b/numpy/_core/tests/test_cpu_dispatcher.py @@ -1,10 +1,9 @@ +from numpy._core import _umath_tests from numpy._core._multiarray_umath import ( __cpu_baseline__, __cpu_dispatch__, __cpu_features__, ) - -from numpy._core import _umath_tests from numpy.testing import assert_equal @@ -13,17 +12,17 @@ def test_dispatcher(): Testing the utilities of the CPU dispatcher """ targets = ( - "SSE2", "SSE41", "AVX2", + "X86_V2", "X86_V3", "VSX", "VSX2", "VSX3", "NEON", "ASIMD", "ASIMDHP", - "VX", "VXE", "LSX" + "VX", "VXE", "LSX", "RVV" ) highest_sfx = "" # no suffix for the baseline all_sfx = [] for feature in reversed(targets): - # skip baseline features, by the default `CCompilerOpt` do not generate separated objects - # for the baseline, just one object combined all of them via 'baseline' option - # within the configuration statements. + # skip baseline features, by the default `CCompilerOpt` do not generate + # separated objects for the baseline, just one object combined all of them + # via 'baseline' option within the configuration statements. if feature in __cpu_baseline__: continue # check compiler and running machine support diff --git a/numpy/_core/tests/test_cpu_features.py b/numpy/_core/tests/test_cpu_features.py index d1e3dc610d49..431fcb40b324 100644 --- a/numpy/_core/tests/test_cpu_features.py +++ b/numpy/_core/tests/test_cpu_features.py @@ -6,6 +6,7 @@ import sys import pytest + from numpy._core._multiarray_umath import ( __cpu_baseline__, __cpu_dispatch__, @@ -123,6 +124,7 @@ def load_flags_auxv(self): " therefore this test class cannot be properly executed." ), ) +@pytest.mark.thread_unsafe(reason="setup & tmp_path_factory threads-unsafe, modifies environment variables") class TestEnvPrivation: cwd = pathlib.Path(__file__).parent.resolve() env = os.environ.copy() @@ -337,34 +339,35 @@ def test_impossible_feature_enable(self): not (is_linux or is_cygwin) or not is_x86, reason="Only for Linux and x86" ) class Test_X86_Features(AbstractTest): - features = [ - "MMX", "SSE", "SSE2", "SSE3", "SSSE3", "SSE41", "POPCNT", "SSE42", - "AVX", "F16C", "XOP", "FMA4", "FMA3", "AVX2", "AVX512F", "AVX512CD", - "AVX512ER", "AVX512PF", "AVX5124FMAPS", "AVX5124VNNIW", "AVX512VPOPCNTDQ", - "AVX512VL", "AVX512BW", "AVX512DQ", "AVX512VNNI", "AVX512IFMA", - "AVX512VBMI", "AVX512VBMI2", "AVX512BITALG", "AVX512FP16", - ] + features = [] + features_groups = { - "AVX512_KNL": ["AVX512F", "AVX512CD", "AVX512ER", "AVX512PF"], - "AVX512_KNM": ["AVX512F", "AVX512CD", "AVX512ER", "AVX512PF", "AVX5124FMAPS", - "AVX5124VNNIW", "AVX512VPOPCNTDQ"], - "AVX512_SKX": ["AVX512F", "AVX512CD", "AVX512BW", "AVX512DQ", "AVX512VL"], - "AVX512_CLX": ["AVX512F", "AVX512CD", "AVX512BW", "AVX512DQ", "AVX512VL", "AVX512VNNI"], - "AVX512_CNL": ["AVX512F", "AVX512CD", "AVX512BW", "AVX512DQ", "AVX512VL", "AVX512IFMA", - "AVX512VBMI"], - "AVX512_ICL": ["AVX512F", "AVX512CD", "AVX512BW", "AVX512DQ", "AVX512VL", "AVX512IFMA", - "AVX512VBMI", "AVX512VNNI", "AVX512VBMI2", "AVX512BITALG", "AVX512VPOPCNTDQ"], - "AVX512_SPR": ["AVX512F", "AVX512CD", "AVX512BW", "AVX512DQ", - "AVX512VL", "AVX512IFMA", "AVX512VBMI", "AVX512VNNI", - "AVX512VBMI2", "AVX512BITALG", "AVX512VPOPCNTDQ", - "AVX512FP16"], + "X86_V2": [ + "SSE", "SSE2", "SSE3", "SSSE3", "SSE41", "SSE42", + "POPCNT", "LAHF", "CX16" + ], } + features_groups["X86_V3"] = features_groups["X86_V2"] + [ + "AVX", "AVX2", "FMA3", "BMI", "BMI2", + "LZCNT", "F16C", "MOVBE" + ] + features_groups["X86_V4"] = features_groups["X86_V3"] + [ + "AVX512F", "AVX512CD", "AVX512BW", "AVX512DQ", "AVX512VL" + ] + features_groups["AVX512_ICL"] = features_groups["X86_V4"] + [ + "AVX512IFMA", "AVX512VBMI", "AVX512VNNI", + "AVX512VBMI2", "AVX512BITALG", "AVX512VPOPCNTDQ", + "VAES", "VPCLMULQDQ", "GFNI" + ] + features_groups["AVX512_SPR"] = features_groups["AVX512_ICL"] + ["AVX512FP16", "AVX512BF16"] + features_map = { "SSE3": "PNI", "SSE41": "SSE4_1", "SSE42": "SSE4_2", "FMA3": "FMA", + "BMI": "BMI1", "LZCNT": "ABM", "LAHF": "LAHF_LM", "AVX512VNNI": "AVX512_VNNI", "AVX512BITALG": "AVX512_BITALG", "AVX512VBMI2": "AVX512_VBMI2", "AVX5124FMAPS": "AVX512_4FMAPS", "AVX5124VNNIW": "AVX512_4VNNIW", "AVX512VPOPCNTDQ": "AVX512_VPOPCNTDQ", - "AVX512FP16": "AVX512_FP16", + "AVX512FP16": "AVX512_FP16", "AVX512BF16": "AVX512_BF16" } def load_flags(self): @@ -430,3 +433,18 @@ class Test_LOONGARCH_Features(AbstractTest): def load_flags(self): self.load_flags_cpuinfo("Features") + + +is_riscv = re.match(r"^(riscv)", machine, re.IGNORECASE) +@pytest.mark.skipif(not is_linux or not is_riscv, reason="Only for Linux and RISC-V") +class Test_RISCV_Features(AbstractTest): + features = ["RVV"] + + def load_flags(self): + self.load_flags_auxv() + if not self.features_flags: + # Let the test fail and dump if we cannot read HWCAP. + return + hwcap = int(next(iter(self.features_flags)), 16) + if hwcap & (1 << 21): # HWCAP_RISCV_V + self.features_flags.add("RVV") diff --git a/numpy/_core/tests/test_custom_dtypes.py b/numpy/_core/tests/test_custom_dtypes.py index 66e6de35b427..2acb4adf4c7c 100644 --- a/numpy/_core/tests/test_custom_dtypes.py +++ b/numpy/_core/tests/test_custom_dtypes.py @@ -1,12 +1,12 @@ from tempfile import NamedTemporaryFile import pytest + +import numpy as np from numpy._core._multiarray_umath import ( _discover_array_parameters as discover_array_params, + _get_sfloat_dtype, ) -from numpy._core._multiarray_umath import _get_sfloat_dtype - -import numpy as np from numpy.testing import assert_array_equal SF = _get_sfloat_dtype() @@ -231,6 +231,78 @@ def test_wrapped_and_wrapped_reductions(self): expected = np.hypot.reduce(float_equiv, keepdims=True) assert res.view(np.float64) * 2 == expected + def test_sort(self): + a = self._get_array(1.) + a = a[::-1] # reverse it + + a.sort() + assert_array_equal(a.view(np.float64), [1., 2., 3.]) + + a = self._get_array(1.) + a = a[::-1] # reverse it + + sorted_a = np.sort(a) + assert_array_equal(sorted_a.view(np.float64), [1., 2., 3.]) + # original is unchanged + assert_array_equal(a.view(np.float64), [3., 2., 1.]) + + a = self._get_array(0.5) # different factor + a = a[::2][::-1] # non-contiguous + sorted_a = np.sort(a) + assert_array_equal(sorted_a.view(np.float64), [2., 6.]) + # original is unchanged + assert_array_equal(a.view(np.float64), [6., 2.]) + + a = self._get_array(0.5, aligned=False) + a = a[::-1] # reverse it + sorted_a = np.sort(a) + assert_array_equal(sorted_a.view(np.float64), [2., 4., 6.]) + # original is unchanged + assert_array_equal(a.view(np.float64), [6., 4., 2.]) + + sorted_a = np.sort(a, stable=True) + assert_array_equal(sorted_a.view(np.float64), [2., 4., 6.]) + # original is unchanged + assert_array_equal(a.view(np.float64), [6., 4., 2.]) + + sorted_a = np.sort(a, stable=False) + assert_array_equal(sorted_a.view(np.float64), [2., 4., 6.]) + # original is unchanged + assert_array_equal(a.view(np.float64), [6., 4., 2.]) + + def test_argsort(self): + a = self._get_array(1.) + a = a[::-1] # reverse it + + indices = np.argsort(a) + assert_array_equal(indices, [2, 1, 0]) + # original is unchanged + assert_array_equal(a.view(np.float64), [3., 2., 1.]) + + a = self._get_array(0.5) + a = a[::2][::-1] # reverse it + indices = np.argsort(a) + assert_array_equal(indices, [1, 0]) + # original is unchanged + assert_array_equal(a.view(np.float64), [6., 2.]) + + a = self._get_array(0.5, aligned=False) + a = a[::-1] # reverse it + indices = np.argsort(a) + assert_array_equal(indices, [2, 1, 0]) + # original is unchanged + assert_array_equal(a.view(np.float64), [6., 4., 2.]) + + sorted_indices = np.argsort(a, stable=True) + assert_array_equal(sorted_indices, [2, 1, 0]) + # original is unchanged + assert_array_equal(a.view(np.float64), [6., 4., 2.]) + + sorted_indices = np.argsort(a, stable=False) + assert_array_equal(sorted_indices, [2, 1, 0]) + # original is unchanged + assert_array_equal(a.view(np.float64), [6., 4., 2.]) + def test_astype_class(self): # Very simple test that we accept `.astype()` also on the class. # ScaledFloat always returns the default descriptor, but it does @@ -255,6 +327,9 @@ def test_creation_class(self): assert np.zeros(3, dtype=SF).dtype == SF(1.) assert np.zeros_like(arr1, dtype=SF).dtype == SF(1.) + @pytest.mark.thread_unsafe( + reason="_ScaledFloatTestDType setup is thread-unsafe (gh-29850)" + ) def test_np_save_load(self): # this monkeypatch is needed because pickle # uses the repr of a type to reconstruct it @@ -298,6 +373,9 @@ def test_flatiter_index(self, index): np.testing.assert_array_equal( arr.view(np.float64), arr2.view(np.float64)) +@pytest.mark.thread_unsafe( + reason="_ScaledFloatTestDType setup is thread-unsafe (gh-29850)" +) def test_type_pickle(): # can't actually unpickle, but we can pickle (if in namespace) import pickle diff --git a/numpy/_core/tests/test_cython.py b/numpy/_core/tests/test_cython.py index fb3839fd2685..c405a59e535e 100644 --- a/numpy/_core/tests/test_cython.py +++ b/numpy/_core/tests/test_cython.py @@ -345,7 +345,8 @@ def test_npystring_allocators_other_dtype(install_temp): assert checks.npystring_allocators_other_types(arr1, arr2) == 0 -@pytest.mark.skipif(sysconfig.get_platform() == 'win-arm64', reason='no checks module on win-arm64') -def test_npy_uintp_type_enum(): +@pytest.mark.skipif(sysconfig.get_platform() == 'win-arm64', + reason='no checks module on win-arm64') +def test_npy_uintp_type_enum(install_temp): import checks assert checks.check_npy_uintp_type_enum() diff --git a/numpy/_core/tests/test_datetime.py b/numpy/_core/tests/test_datetime.py index 1cbacb8a26a8..c7b11149ed43 100644 --- a/numpy/_core/tests/test_datetime.py +++ b/numpy/_core/tests/test_datetime.py @@ -1,5 +1,7 @@ import datetime import pickle +import warnings +from zoneinfo import ZoneInfo, ZoneInfoNotFoundError import pytest @@ -12,22 +14,18 @@ assert_equal, assert_raises, assert_raises_regex, - assert_warns, - suppress_warnings, ) -# Use pytz to test out various time zones if available -try: - from pytz import timezone as tz - _has_pytz = True -except ImportError: - _has_pytz = False - try: RecursionError except NameError: RecursionError = RuntimeError # python < 3.5 +try: + ZoneInfo("US/Central") + _has_tz = True +except ZoneInfoNotFoundError: + _has_tz = False def _assert_equal_hash(v1, v2): assert v1 == v2 @@ -266,10 +264,12 @@ def test_datetime_scalar_construction(self): # Some basic strings and repr assert_equal(str(np.datetime64('NaT')), 'NaT') assert_equal(repr(np.datetime64('NaT')), - "np.datetime64('NaT')") + "np.datetime64('NaT','generic')") assert_equal(str(np.datetime64('2011-02')), '2011-02') assert_equal(repr(np.datetime64('2011-02')), "np.datetime64('2011-02')") + assert_equal(repr(np.datetime64('NaT').astype(np.dtype("datetime64[ns]"))), + "np.datetime64('NaT','ns')") # None gets constructed as NaT assert_equal(np.datetime64(None), np.datetime64('NaT')) @@ -844,6 +844,21 @@ def test_timedelta_array_str(self): a = np.array([-1, 'NaT', 1234567], dtype=''\np4\nNNNI-1\nI-1\nI0\n((dp5\n(S'us'\np6\n"\ - b"I1\nI1\nI1\ntp7\ntp8\ntp9\nb." - assert_equal(pickle.loads(pkl), np.dtype('>M8[us]')) + with pytest.warns(np.exceptions.VisibleDeprecationWarning, + match=r".*align should be passed"): + pkl = b"cnumpy\ndtype\np0\n(S'M8'\np1\nI0\nI1\ntp2\nRp3\n"\ + b"(I4\nS'<'\np4\nNNNI-1\nI-1\nI0\n((dp5\n(S'D'\np6\n"\ + b"I7\nI1\nI1\ntp7\ntp8\ntp9\nb." + assert_equal(pickle.loads(pkl), np.dtype(''\np4\nNNNI-1\nI-1\nI0\n((dp5\n(S'us'\np6\n"\ + b"I1\nI1\nI1\ntp7\ntp8\ntp9\nb." + assert_equal(pickle.loads(pkl), np.dtype('>M8[us]')) + + def test_gh_29555(self): + # check that dtype metadata round-trips when none + dt = np.dtype('>M8[us]') + assert dt.metadata is None + for proto in range(2, pickle.HIGHEST_PROTOCOL + 1): + res = pickle.loads(pickle.dumps(dt, protocol=proto)) + assert_equal(res, dt) + assert res.metadata is None def test_setstate(self): "Verify that datetime dtype __setstate__ can handle bad arguments" dt = np.dtype('>M8[us]') - assert_raises(ValueError, dt.__setstate__, (4, '>', None, None, None, -1, -1, 0, 1)) + assert_raises(ValueError, dt.__setstate__, + (4, '>', None, None, None, -1, -1, 0, 1)) assert_(dt.__reduce__()[2] == np.dtype('>M8[us]').__reduce__()[2]) - assert_raises(TypeError, dt.__setstate__, (4, '>', None, None, None, -1, -1, 0, ({}, 'xxx'))) + assert_raises(TypeError, dt.__setstate__, + (4, '>', None, None, None, -1, -1, 0, ({}, 'xxx'))) assert_(dt.__reduce__()[2] == np.dtype('>M8[us]').__reduce__()[2]) def test_dtype_promotion(self): @@ -1279,8 +1307,9 @@ def test_datetime_multiply(self): assert_raises(TypeError, np.multiply, 1.5, dta) # NaTs - with suppress_warnings() as sup: - sup.filter(RuntimeWarning, "invalid value encountered in multiply") + with warnings.catch_warnings(): + warnings.filterwarnings( + 'ignore', "invalid value encountered in multiply", RuntimeWarning) nat = np.timedelta64('NaT') def check(a, b, res): @@ -1341,7 +1370,7 @@ def test_timedelta_floor_divide(self, op1, op2, exp): np.timedelta64(-1)), ]) def test_timedelta_floor_div_warnings(self, op1, op2): - with assert_warns(RuntimeWarning): + with pytest.warns(RuntimeWarning): actual = op1 // op2 assert_equal(actual, 0) assert_equal(actual.dtype, np.int64) @@ -1425,9 +1454,9 @@ def test_timedelta_divmod_typeerror(self, op1, op2): np.timedelta64(-1)), ]) def test_timedelta_divmod_warnings(self, op1, op2): - with assert_warns(RuntimeWarning): + with pytest.warns(RuntimeWarning): expected = (op1 // op2, op1 % op2) - with assert_warns(RuntimeWarning): + with pytest.warns(RuntimeWarning): actual = divmod(op1, op2) assert_equal(actual, expected) @@ -1479,8 +1508,9 @@ def test_datetime_divide(self): assert_raises(TypeError, np.divide, 1.5, dta) # NaTs - with suppress_warnings() as sup: - sup.filter(RuntimeWarning, r".*encountered in divide") + with warnings.catch_warnings(): + warnings.filterwarnings( + 'ignore', r".*encountered in divide", RuntimeWarning) nat = np.timedelta64('NaT') for tp in (int, float): assert_equal(np.timedelta64(1) / tp(0), nat) @@ -1840,6 +1870,10 @@ def test_datetime_as_string(self): '2032-07-18') assert_equal(np.datetime_as_string(a, unit='D', casting='unsafe'), '2032-07-18') + + with pytest.raises(ValueError): + np.datetime_as_string(a, unit='Y', casting='same_value') + assert_equal(np.datetime_as_string(a, unit='h'), '2032-07-18T12') assert_equal(np.datetime_as_string(a, unit='m'), '2032-07-18T12:23') @@ -1886,7 +1920,7 @@ def test_datetime_as_string(self): np.datetime64('2032-01-01T00:00:00', 'us'), unit='auto'), '2032-01-01') - @pytest.mark.skipif(not _has_pytz, reason="The pytz module is not available.") + @pytest.mark.skipif(not _has_tz, reason="The tzdata module is not available.") def test_datetime_as_string_timezone(self): # timezone='local' vs 'UTC' a = np.datetime64('2010-03-15T06:30', 'm') @@ -1901,29 +1935,29 @@ def test_datetime_as_string_timezone(self): b = np.datetime64('2010-02-15T06:30', 'm') - assert_equal(np.datetime_as_string(a, timezone=tz('US/Central')), + assert_equal(np.datetime_as_string(a, timezone=ZoneInfo('US/Central')), '2010-03-15T01:30-0500') - assert_equal(np.datetime_as_string(a, timezone=tz('US/Eastern')), + assert_equal(np.datetime_as_string(a, timezone=ZoneInfo('US/Eastern')), '2010-03-15T02:30-0400') - assert_equal(np.datetime_as_string(a, timezone=tz('US/Pacific')), + assert_equal(np.datetime_as_string(a, timezone=ZoneInfo('US/Pacific')), '2010-03-14T23:30-0700') - assert_equal(np.datetime_as_string(b, timezone=tz('US/Central')), + assert_equal(np.datetime_as_string(b, timezone=ZoneInfo('US/Central')), '2010-02-15T00:30-0600') - assert_equal(np.datetime_as_string(b, timezone=tz('US/Eastern')), + assert_equal(np.datetime_as_string(b, timezone=ZoneInfo('US/Eastern')), '2010-02-15T01:30-0500') - assert_equal(np.datetime_as_string(b, timezone=tz('US/Pacific')), + assert_equal(np.datetime_as_string(b, timezone=ZoneInfo('US/Pacific')), '2010-02-14T22:30-0800') # Dates to strings with a timezone attached is disabled by default assert_raises(TypeError, np.datetime_as_string, a, unit='D', - timezone=tz('US/Pacific')) + timezone=ZoneInfo('US/Pacific')) # Check that we can print out the date in the specified time zone assert_equal(np.datetime_as_string(a, unit='D', - timezone=tz('US/Pacific'), casting='unsafe'), + timezone=ZoneInfo('US/Pacific'), casting='unsafe'), '2010-03-14') assert_equal(np.datetime_as_string(b, unit='D', - timezone=tz('US/Central'), casting='unsafe'), + timezone=ZoneInfo('US/Central'), casting='unsafe'), '2010-02-15') def test_datetime_arange(self): @@ -2042,7 +2076,7 @@ def test_timedelta_modulus_error(self, val1, val2): @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm") def test_timedelta_modulus_div_by_zero(self): - with assert_warns(RuntimeWarning): + with pytest.warns(RuntimeWarning): actual = np.timedelta64(10, 's') % np.timedelta64(0, 's') assert_equal(actual, np.timedelta64('NaT')) @@ -2666,6 +2700,54 @@ def test_timedelta_hash_big_positive(self, wk, unit): td2 = np.timedelta64(td, unit) _assert_equal_hash(td, td2) + @pytest.mark.parametrize( + "inputs, divisor, expected", + [ + ( + np.array( + [datetime.timedelta(seconds=20), datetime.timedelta(days=2)], + dtype="object", + ), + np.int64(2), + np.array( + [datetime.timedelta(seconds=10), datetime.timedelta(days=1)], + dtype="object", + ), + ), + ( + np.array( + [datetime.timedelta(seconds=20), datetime.timedelta(days=2)], + dtype="object", + ), + np.timedelta64(2, "s"), + np.array( + [10.0, 24.0 * 60.0 * 60.0], + dtype="object", + ), + ), + ( + datetime.timedelta(seconds=2), + np.array( + [datetime.timedelta(seconds=20), datetime.timedelta(days=2)], + dtype="object", + ), + np.array( + [1.0 / 10.0, 1.0 / (24.0 * 60.0 * 60.0)], + dtype="object", + ), + ), + ], + ) + def test_true_divide_object_by_timedelta( + self, + inputs: np.ndarray | type[np.generic], + divisor: np.ndarray | type[np.generic], + expected: np.ndarray, + ): + # gh-30025 + results = inputs / divisor + assert_array_equal(results, expected) + class TestDateTimeData: diff --git a/numpy/_core/tests/test_defchararray.py b/numpy/_core/tests/test_defchararray.py index 2607953a940a..e98632b62829 100644 --- a/numpy/_core/tests/test_defchararray.py +++ b/numpy/_core/tests/test_defchararray.py @@ -134,63 +134,67 @@ def fail(): assert_raises(ValueError, fail) - class TestWhitespace: - def setup_method(self): - self.A = np.array([['abc ', '123 '], - ['789 ', 'xyz ']]).view(np.char.chararray) - self.B = np.array([['abc', '123'], - ['789', 'xyz']]).view(np.char.chararray) - def test1(self): - assert_(np.all(self.A == self.B)) - assert_(np.all(self.A >= self.B)) - assert_(np.all(self.A <= self.B)) - assert_(not np.any(self.A > self.B)) - assert_(not np.any(self.A < self.B)) - assert_(not np.any(self.A != self.B)) + A = np.array([['abc ', '123 '], + ['789 ', 'xyz ']]).view(np.char.chararray) + B = np.array([['abc', '123'], + ['789', 'xyz']]).view(np.char.chararray) + assert_(np.all(A == B)) + assert_(np.all(A >= B)) + assert_(np.all(A <= B)) + assert_(not np.any(A > B)) + assert_(not np.any(A < B)) + assert_(not np.any(A != B)) class TestChar: - def setup_method(self): - self.A = np.array('abc1', dtype='c').view(np.char.chararray) - def test_it(self): - assert_equal(self.A.shape, (4,)) - assert_equal(self.A.upper()[:2].tobytes(), b'AB') + A = np.array('abc1', dtype='c').view(np.char.chararray) + assert_equal(A.shape, (4,)) + assert_equal(A.upper()[:2].tobytes(), b'AB') class TestComparisons: - def setup_method(self): - self.A = np.array([['abc', 'abcc', '123'], - ['789', 'abc', 'xyz']]).view(np.char.chararray) - self.B = np.array([['efg', 'efg', '123 '], - ['051', 'efgg', 'tuv']]).view(np.char.chararray) + def A(self): + return np.array([['abc', 'abcc', '123'], + ['789', 'abc', 'xyz']]).view(np.char.chararray) + + def B(self): + return np.array([['efg', 'efg', '123 '], + ['051', 'efgg', 'tuv']]).view(np.char.chararray) def test_not_equal(self): - assert_array_equal((self.A != self.B), + A, B = self.A(), self.B() + assert_array_equal((A != B), [[True, True, False], [True, True, True]]) def test_equal(self): - assert_array_equal((self.A == self.B), + A, B = self.A(), self.B() + assert_array_equal((A == B), [[False, False, True], [False, False, False]]) def test_greater_equal(self): - assert_array_equal((self.A >= self.B), + A, B = self.A(), self.B() + assert_array_equal((A >= B), [[False, False, True], [True, False, True]]) def test_less_equal(self): - assert_array_equal((self.A <= self.B), + A, B = self.A(), self.B() + assert_array_equal((A <= B), [[True, True, True], [False, True, False]]) def test_greater(self): - assert_array_equal((self.A > self.B), + A, B = self.A(), self.B() + assert_array_equal((A > B), [[False, False, False], [True, False, True]]) def test_less(self): - assert_array_equal((self.A < self.B), + A, B = self.A(), self.B() + assert_array_equal((A < B), [[True, True, False], [False, True, False]]) def test_type(self): - out1 = np.char.equal(self.A, self.B) + A, B = self.A(), self.B() + out1 = np.char.equal(A, B) out2 = np.char.equal('a', 'a') assert_(isinstance(out1, np.ndarray)) assert_(isinstance(out2, np.ndarray)) @@ -198,59 +202,56 @@ def test_type(self): class TestComparisonsMixed1(TestComparisons): """Ticket #1276""" - def setup_method(self): - TestComparisons.setup_method(self) - self.B = np.array( + def B(self): + return np.array( [['efg', 'efg', '123 '], ['051', 'efgg', 'tuv']], np.str_).view(np.char.chararray) class TestComparisonsMixed2(TestComparisons): """Ticket #1276""" - def setup_method(self): - TestComparisons.setup_method(self) - self.A = np.array( + def A(self): + return np.array( [['abc', 'abcc', '123'], ['789', 'abc', 'xyz']], np.str_).view(np.char.chararray) class TestInformation: - def setup_method(self): - self.A = np.array([[' abc ', ''], - ['12345', 'MixedCase'], - ['123 \t 345 \0 ', 'UPPER']]) \ - .view(np.char.chararray) - self.B = np.array([[' \u03a3 ', ''], - ['12345', 'MixedCase'], - ['123 \t 345 \0 ', 'UPPER']]) \ - .view(np.char.chararray) - # Array with longer strings, > MEMCHR_CUT_OFF in code. - self.C = (np.array(['ABCDEFGHIJKLMNOPQRSTUVWXYZ', - '01234567890123456789012345']) - .view(np.char.chararray)) + def A(self): + return np.array([[' abc ', ''], + ['12345', 'MixedCase'], + ['123 \t 345 \0 ', 'UPPER']]) \ + .view(np.char.chararray) + + def B(self): + return np.array([[' \u03a3 ', ''], + ['12345', 'MixedCase'], + ['123 \t 345 \0 ', 'UPPER']]) \ + .view(np.char.chararray) def test_len(self): - assert_(issubclass(np.char.str_len(self.A).dtype.type, np.integer)) - assert_array_equal(np.char.str_len(self.A), [[5, 0], [5, 9], [12, 5]]) - assert_array_equal(np.char.str_len(self.B), [[3, 0], [5, 9], [12, 5]]) + A, B = self.A(), self.B() + assert_(issubclass(np.char.str_len(A).dtype.type, np.integer)) + assert_array_equal(np.char.str_len(A), [[5, 0], [5, 9], [12, 5]]) + assert_array_equal(np.char.str_len(B), [[3, 0], [5, 9], [12, 5]]) def test_count(self): - assert_(issubclass(self.A.count('').dtype.type, np.integer)) - assert_array_equal(self.A.count('a'), [[1, 0], [0, 1], [0, 0]]) - assert_array_equal(self.A.count('123'), [[0, 0], [1, 0], [1, 0]]) + A, B = self.A(), self.B() + assert_(issubclass(A.count('').dtype.type, np.integer)) + assert_array_equal(A.count('a'), [[1, 0], [0, 1], [0, 0]]) + assert_array_equal(A.count('123'), [[0, 0], [1, 0], [1, 0]]) # Python doesn't seem to like counting NULL characters - # assert_array_equal(self.A.count('\0'), [[0, 0], [0, 0], [1, 0]]) - assert_array_equal(self.A.count('a', 0, 2), [[1, 0], [0, 0], [0, 0]]) - assert_array_equal(self.B.count('a'), [[0, 0], [0, 1], [0, 0]]) - assert_array_equal(self.B.count('123'), [[0, 0], [1, 0], [1, 0]]) - # assert_array_equal(self.B.count('\0'), [[0, 0], [0, 0], [1, 0]]) + assert_array_equal(A.count('a', 0, 2), [[1, 0], [0, 0], [0, 0]]) + assert_array_equal(B.count('a'), [[0, 0], [0, 1], [0, 0]]) + assert_array_equal(B.count('123'), [[0, 0], [1, 0], [1, 0]]) def test_endswith(self): - assert_(issubclass(self.A.endswith('').dtype.type, np.bool)) - assert_array_equal(self.A.endswith(' '), [[1, 0], [0, 0], [1, 0]]) - assert_array_equal(self.A.endswith('3', 0, 3), [[0, 0], [1, 0], [1, 0]]) + A = self.A() + assert_(issubclass(A.endswith('').dtype.type, np.bool)) + assert_array_equal(A.endswith(' '), [[1, 0], [0, 0], [1, 0]]) + assert_array_equal(A.endswith('3', 0, 3), [[0, 0], [1, 0], [1, 0]]) def fail(): - self.A.endswith('3', 'fdjk') + A.endswith('3', 'fdjk') assert_raises(TypeError, fail) @@ -260,7 +261,7 @@ def fail(): ("S", lambda x: x.encode('ascii')), ]) def test_find(self, dtype, encode): - A = self.A.astype(dtype) + A = self.A().astype(dtype) assert_(issubclass(A.find(encode('a')).dtype.type, np.integer)) assert_array_equal(A.find(encode('a')), [[1, -1], [-1, 6], [-1, -1]]) @@ -270,103 +271,119 @@ def test_find(self, dtype, encode): [[1, -1], [-1, -1], [-1, -1]]) assert_array_equal(A.find([encode('1'), encode('P')]), [[-1, -1], [0, -1], [0, 1]]) - C = self.C.astype(dtype) + C = (np.array(['ABCDEFGHIJKLMNOPQRSTUVWXYZ', + '01234567890123456789012345']) + .view(np.char.chararray)).astype(dtype) assert_array_equal(C.find(encode('M')), [12, -1]) def test_index(self): + A = self.A() def fail(): - self.A.index('a') + A.index('a') assert_raises(ValueError, fail) assert_(np.char.index('abcba', 'b') == 1) assert_(issubclass(np.char.index('abcba', 'b').dtype.type, np.integer)) def test_isalnum(self): - assert_(issubclass(self.A.isalnum().dtype.type, np.bool)) - assert_array_equal(self.A.isalnum(), [[False, False], [True, True], [False, True]]) + A = self.A() + assert_(issubclass(A.isalnum().dtype.type, np.bool)) + assert_array_equal(A.isalnum(), [[False, False], [True, True], [False, True]]) def test_isalpha(self): - assert_(issubclass(self.A.isalpha().dtype.type, np.bool)) - assert_array_equal(self.A.isalpha(), [[False, False], [False, True], [False, True]]) + A = self.A() + assert_(issubclass(A.isalpha().dtype.type, np.bool)) + assert_array_equal(A.isalpha(), [[False, False], [False, True], [False, True]]) def test_isdigit(self): - assert_(issubclass(self.A.isdigit().dtype.type, np.bool)) - assert_array_equal(self.A.isdigit(), [[False, False], [True, False], [False, False]]) + A = self.A() + assert_(issubclass(A.isdigit().dtype.type, np.bool)) + assert_array_equal(A.isdigit(), [[False, False], [True, False], [False, False]]) def test_islower(self): - assert_(issubclass(self.A.islower().dtype.type, np.bool)) - assert_array_equal(self.A.islower(), [[True, False], [False, False], [False, False]]) + A = self.A() + assert_(issubclass(A.islower().dtype.type, np.bool)) + assert_array_equal(A.islower(), [[True, False], [False, False], [False, False]]) def test_isspace(self): - assert_(issubclass(self.A.isspace().dtype.type, np.bool)) - assert_array_equal(self.A.isspace(), [[False, False], [False, False], [False, False]]) + A = self.A() + assert_(issubclass(A.isspace().dtype.type, np.bool)) + assert_array_equal(A.isspace(), [[False, False], [False, False], [False, False]]) def test_istitle(self): - assert_(issubclass(self.A.istitle().dtype.type, np.bool)) - assert_array_equal(self.A.istitle(), [[False, False], [False, False], [False, False]]) + A = self.A() + assert_(issubclass(A.istitle().dtype.type, np.bool)) + assert_array_equal(A.istitle(), [[False, False], [False, False], [False, False]]) def test_isupper(self): - assert_(issubclass(self.A.isupper().dtype.type, np.bool)) - assert_array_equal(self.A.isupper(), [[False, False], [False, False], [False, True]]) + A = self.A() + assert_(issubclass(A.isupper().dtype.type, np.bool)) + assert_array_equal(A.isupper(), [[False, False], [False, False], [False, True]]) def test_rfind(self): - assert_(issubclass(self.A.rfind('a').dtype.type, np.integer)) - assert_array_equal(self.A.rfind('a'), [[1, -1], [-1, 6], [-1, -1]]) - assert_array_equal(self.A.rfind('3'), [[-1, -1], [2, -1], [6, -1]]) - assert_array_equal(self.A.rfind('a', 0, 2), [[1, -1], [-1, -1], [-1, -1]]) - assert_array_equal(self.A.rfind(['1', 'P']), [[-1, -1], [0, -1], [0, 2]]) + A = self.A() + assert_(issubclass(A.rfind('a').dtype.type, np.integer)) + assert_array_equal(A.rfind('a'), [[1, -1], [-1, 6], [-1, -1]]) + assert_array_equal(A.rfind('3'), [[-1, -1], [2, -1], [6, -1]]) + assert_array_equal(A.rfind('a', 0, 2), [[1, -1], [-1, -1], [-1, -1]]) + assert_array_equal(A.rfind(['1', 'P']), [[-1, -1], [0, -1], [0, 2]]) def test_rindex(self): + A = self.A() def fail(): - self.A.rindex('a') + A.rindex('a') assert_raises(ValueError, fail) assert_(np.char.rindex('abcba', 'b') == 3) assert_(issubclass(np.char.rindex('abcba', 'b').dtype.type, np.integer)) def test_startswith(self): - assert_(issubclass(self.A.startswith('').dtype.type, np.bool)) - assert_array_equal(self.A.startswith(' '), [[1, 0], [0, 0], [0, 0]]) - assert_array_equal(self.A.startswith('1', 0, 3), [[0, 0], [1, 0], [1, 0]]) + A = self.A() + assert_(issubclass(A.startswith('').dtype.type, np.bool)) + assert_array_equal(A.startswith(' '), [[1, 0], [0, 0], [0, 0]]) + assert_array_equal(A.startswith('1', 0, 3), [[0, 0], [1, 0], [1, 0]]) def fail(): - self.A.startswith('3', 'fdjk') + A.startswith('3', 'fdjk') assert_raises(TypeError, fail) - class TestMethods: - def setup_method(self): - self.A = np.array([[' abc ', ''], - ['12345', 'MixedCase'], - ['123 \t 345 \0 ', 'UPPER']], - dtype='S').view(np.char.chararray) - self.B = np.array([[' \u03a3 ', ''], - ['12345', 'MixedCase'], - ['123 \t 345 \0 ', 'UPPER']]).view( - np.char.chararray) + def A(self): + return np.array([[' abc ', ''], + ['12345', 'MixedCase'], + ['123 \t 345 \0 ', 'UPPER']], + dtype='S').view(np.char.chararray) + + def B(self): + return np.array([[' \u03a3 ', ''], + ['12345', 'MixedCase'], + ['123 \t 345 \0 ', 'UPPER']]) \ + .view(np.char.chararray) def test_capitalize(self): + A, B = self.A(), self.B() tgt = [[b' abc ', b''], [b'12345', b'Mixedcase'], [b'123 \t 345 \0 ', b'Upper']] - assert_(issubclass(self.A.capitalize().dtype.type, np.bytes_)) - assert_array_equal(self.A.capitalize(), tgt) + assert_(issubclass(A.capitalize().dtype.type, np.bytes_)) + assert_array_equal(A.capitalize(), tgt) tgt = [[' \u03c3 ', ''], ['12345', 'Mixedcase'], ['123 \t 345 \0 ', 'Upper']] - assert_(issubclass(self.B.capitalize().dtype.type, np.str_)) - assert_array_equal(self.B.capitalize(), tgt) + assert_(issubclass(B.capitalize().dtype.type, np.str_)) + assert_array_equal(B.capitalize(), tgt) def test_center(self): - assert_(issubclass(self.A.center(10).dtype.type, np.bytes_)) - C = self.A.center([10, 20]) + A = self.A() + assert_(issubclass(A.center(10).dtype.type, np.bytes_)) + C = A.center([10, 20]) assert_array_equal(np.char.str_len(C), [[10, 20], [10, 20], [12, 20]]) - C = self.A.center(20, b'#') + C = A.center(20, b'#') assert_(np.all(C.startswith(b'#'))) assert_(np.all(C.endswith(b'#'))) @@ -381,17 +398,17 @@ def test_decode(self): assert_(A.decode('unicode-escape')[0] == '\u03a3') def test_encode(self): - B = self.B.encode('unicode_escape') + B = self.B().encode('unicode_escape') assert_(B[0][0] == ' \\u03a3 '.encode('latin1')) def test_expandtabs(self): - T = self.A.expandtabs() + T = self.A().expandtabs() assert_(T[2, 0] == b'123 345 \0') def test_join(self): # NOTE: list(b'123') == [49, 50, 51] # so that b','.join(b'123') results to an error on Py3 - A0 = self.A.decode('ascii') + A0 = self.A().decode('ascii') A = np.char.join([',', '#'], A0) assert_(issubclass(A.dtype.type, np.str_)) @@ -401,12 +418,13 @@ def test_join(self): assert_array_equal(np.char.join([',', '#'], A0), tgt) def test_ljust(self): - assert_(issubclass(self.A.ljust(10).dtype.type, np.bytes_)) + A = self.A() + assert_(issubclass(A.ljust(10).dtype.type, np.bytes_)) - C = self.A.ljust([10, 20]) + C = A.ljust([10, 20]) assert_array_equal(np.char.str_len(C), [[10, 20], [10, 20], [12, 20]]) - C = self.A.ljust(20, b'#') + C = A.ljust(20, b'#') assert_array_equal(C.startswith(b'#'), [ [False, True], [False, False], [False, False]]) assert_(np.all(C.endswith(b'#'))) @@ -418,38 +436,41 @@ def test_ljust(self): assert_array_equal(C, tgt) def test_lower(self): + A, B = self.A(), self.B() tgt = [[b' abc ', b''], [b'12345', b'mixedcase'], [b'123 \t 345 \0 ', b'upper']] - assert_(issubclass(self.A.lower().dtype.type, np.bytes_)) - assert_array_equal(self.A.lower(), tgt) + assert_(issubclass(A.lower().dtype.type, np.bytes_)) + assert_array_equal(A.lower(), tgt) tgt = [[' \u03c3 ', ''], ['12345', 'mixedcase'], ['123 \t 345 \0 ', 'upper']] - assert_(issubclass(self.B.lower().dtype.type, np.str_)) - assert_array_equal(self.B.lower(), tgt) + assert_(issubclass(B.lower().dtype.type, np.str_)) + assert_array_equal(B.lower(), tgt) def test_lstrip(self): + A, B = self.A(), self.B() tgt = [[b'abc ', b''], [b'12345', b'MixedCase'], [b'123 \t 345 \0 ', b'UPPER']] - assert_(issubclass(self.A.lstrip().dtype.type, np.bytes_)) - assert_array_equal(self.A.lstrip(), tgt) + assert_(issubclass(A.lstrip().dtype.type, np.bytes_)) + assert_array_equal(A.lstrip(), tgt) tgt = [[b' abc', b''], [b'2345', b'ixedCase'], [b'23 \t 345 \x00', b'UPPER']] - assert_array_equal(self.A.lstrip([b'1', b'M']), tgt) + assert_array_equal(A.lstrip([b'1', b'M']), tgt) tgt = [['\u03a3 ', ''], ['12345', 'MixedCase'], ['123 \t 345 \0 ', 'UPPER']] - assert_(issubclass(self.B.lstrip().dtype.type, np.str_)) - assert_array_equal(self.B.lstrip(), tgt) + assert_(issubclass(B.lstrip().dtype.type, np.str_)) + assert_array_equal(B.lstrip(), tgt) def test_partition(self): - P = self.A.partition([b'3', b'M']) + A = self.A() + P = A.partition([b'3', b'M']) tgt = [[(b' abc ', b'', b''), (b'', b'', b'')], [(b'12', b'3', b'45'), (b'', b'M', b'ixedCase')], [(b'12', b'3', b' \t 345 \0 '), (b'UPPER', b'', b'')]] @@ -457,7 +478,8 @@ def test_partition(self): assert_array_equal(P, tgt) def test_replace(self): - R = self.A.replace([b'3', b'a'], + A = self.A() + R = A.replace([b'3', b'a'], [b'##########', b'@']) tgt = [[b' abc ', b''], [b'12##########45', b'MixedC@se'], @@ -466,14 +488,14 @@ def test_replace(self): assert_array_equal(R, tgt) # Test special cases that should just return the input array, # since replacements are not possible or do nothing. - S1 = self.A.replace(b'A very long byte string, longer than A', b'') - assert_array_equal(S1, self.A) - S2 = self.A.replace(b'', b'') - assert_array_equal(S2, self.A) - S3 = self.A.replace(b'3', b'3') - assert_array_equal(S3, self.A) - S4 = self.A.replace(b'3', b'', count=0) - assert_array_equal(S4, self.A) + S1 = A.replace(b'A very long byte string, longer than A', b'') + assert_array_equal(S1, A) + S2 = A.replace(b'', b'') + assert_array_equal(S2, A) + S3 = A.replace(b'3', b'3') + assert_array_equal(S3, A) + S4 = A.replace(b'3', b'', count=0) + assert_array_equal(S4, A) def test_replace_count_and_size(self): a = np.array(['0123456789' * i for i in range(4)] @@ -510,12 +532,13 @@ def test_replace_broadcasting(self): assert_array_equal(r3, np.array(['X,X,X', 'X,0', 'X'])) def test_rjust(self): - assert_(issubclass(self.A.rjust(10).dtype.type, np.bytes_)) + A = self.A() + assert_(issubclass(A.rjust(10).dtype.type, np.bytes_)) - C = self.A.rjust([10, 20]) + C = A.rjust([10, 20]) assert_array_equal(np.char.str_len(C), [[10, 20], [10, 20], [12, 20]]) - C = self.A.rjust(20, b'#') + C = A.rjust(20, b'#') assert_(np.all(C.startswith(b'#'))) assert_array_equal(C.endswith(b'#'), [[False, True], [False, False], [False, False]]) @@ -527,7 +550,8 @@ def test_rjust(self): assert_array_equal(C, tgt) def test_rpartition(self): - P = self.A.rpartition([b'3', b'M']) + A = self.A() + P = A.rpartition([b'3', b'M']) tgt = [[(b'', b'', b' abc '), (b'', b'', b'')], [(b'12', b'3', b'45'), (b'', b'M', b'ixedCase')], [(b'123 \t ', b'3', b'45 \0 '), (b'', b'', b'UPPER')]] @@ -535,7 +559,7 @@ def test_rpartition(self): assert_array_equal(P, tgt) def test_rsplit(self): - A = self.A.rsplit(b'3') + A = self.A().rsplit(b'3') tgt = [[[b' abc '], [b'']], [[b'12', b'45'], [b'MixedCase']], [[b'12', b' \t ', b'45 \x00 '], [b'UPPER']]] @@ -543,45 +567,47 @@ def test_rsplit(self): assert_equal(A.tolist(), tgt) def test_rstrip(self): - assert_(issubclass(self.A.rstrip().dtype.type, np.bytes_)) + A, B = self.A(), self.B() + assert_(issubclass(A.rstrip().dtype.type, np.bytes_)) tgt = [[b' abc', b''], [b'12345', b'MixedCase'], [b'123 \t 345', b'UPPER']] - assert_array_equal(self.A.rstrip(), tgt) + assert_array_equal(A.rstrip(), tgt) tgt = [[b' abc ', b''], [b'1234', b'MixedCase'], [b'123 \t 345 \x00', b'UPP'] ] - assert_array_equal(self.A.rstrip([b'5', b'ER']), tgt) + assert_array_equal(A.rstrip([b'5', b'ER']), tgt) tgt = [[' \u03a3', ''], ['12345', 'MixedCase'], ['123 \t 345', 'UPPER']] - assert_(issubclass(self.B.rstrip().dtype.type, np.str_)) - assert_array_equal(self.B.rstrip(), tgt) + assert_(issubclass(B.rstrip().dtype.type, np.str_)) + assert_array_equal(B.rstrip(), tgt) def test_strip(self): + A, B = self.A(), self.B() tgt = [[b'abc', b''], [b'12345', b'MixedCase'], [b'123 \t 345', b'UPPER']] - assert_(issubclass(self.A.strip().dtype.type, np.bytes_)) - assert_array_equal(self.A.strip(), tgt) + assert_(issubclass(A.strip().dtype.type, np.bytes_)) + assert_array_equal(A.strip(), tgt) tgt = [[b' abc ', b''], [b'234', b'ixedCas'], [b'23 \t 345 \x00', b'UPP']] - assert_array_equal(self.A.strip([b'15', b'EReM']), tgt) + assert_array_equal(A.strip([b'15', b'EReM']), tgt) tgt = [['\u03a3', ''], ['12345', 'MixedCase'], ['123 \t 345', 'UPPER']] - assert_(issubclass(self.B.strip().dtype.type, np.str_)) - assert_array_equal(self.B.strip(), tgt) + assert_(issubclass(B.strip().dtype.type, np.str_)) + assert_array_equal(B.strip(), tgt) def test_split(self): - A = self.A.split(b'3') + A = self.A().split(b'3') tgt = [ [[b' abc '], [b'']], [[b'12', b'45'], [b'MixedCase']], @@ -596,90 +622,103 @@ def test_splitlines(self): assert_(len(A[0]) == 3) def test_swapcase(self): + A, B = self.A(), self.B() tgt = [[b' ABC ', b''], [b'12345', b'mIXEDcASE'], [b'123 \t 345 \0 ', b'upper']] - assert_(issubclass(self.A.swapcase().dtype.type, np.bytes_)) - assert_array_equal(self.A.swapcase(), tgt) + assert_(issubclass(A.swapcase().dtype.type, np.bytes_)) + assert_array_equal(A.swapcase(), tgt) tgt = [[' \u03c3 ', ''], ['12345', 'mIXEDcASE'], ['123 \t 345 \0 ', 'upper']] - assert_(issubclass(self.B.swapcase().dtype.type, np.str_)) - assert_array_equal(self.B.swapcase(), tgt) + assert_(issubclass(B.swapcase().dtype.type, np.str_)) + assert_array_equal(B.swapcase(), tgt) def test_title(self): + A, B = self.A(), self.B() tgt = [[b' Abc ', b''], [b'12345', b'Mixedcase'], [b'123 \t 345 \0 ', b'Upper']] - assert_(issubclass(self.A.title().dtype.type, np.bytes_)) - assert_array_equal(self.A.title(), tgt) + assert_(issubclass(A.title().dtype.type, np.bytes_)) + assert_array_equal(A.title(), tgt) tgt = [[' \u03a3 ', ''], ['12345', 'Mixedcase'], ['123 \t 345 \0 ', 'Upper']] - assert_(issubclass(self.B.title().dtype.type, np.str_)) - assert_array_equal(self.B.title(), tgt) + assert_(issubclass(B.title().dtype.type, np.str_)) + assert_array_equal(B.title(), tgt) def test_upper(self): + A, B = self.A(), self.B() tgt = [[b' ABC ', b''], [b'12345', b'MIXEDCASE'], [b'123 \t 345 \0 ', b'UPPER']] - assert_(issubclass(self.A.upper().dtype.type, np.bytes_)) - assert_array_equal(self.A.upper(), tgt) + assert_(issubclass(A.upper().dtype.type, np.bytes_)) + assert_array_equal(A.upper(), tgt) tgt = [[' \u03a3 ', ''], ['12345', 'MIXEDCASE'], ['123 \t 345 \0 ', 'UPPER']] - assert_(issubclass(self.B.upper().dtype.type, np.str_)) - assert_array_equal(self.B.upper(), tgt) + assert_(issubclass(B.upper().dtype.type, np.str_)) + assert_array_equal(B.upper(), tgt) def test_isnumeric(self): + A, B = self.A(), self.B() def fail(): - self.A.isnumeric() + A.isnumeric() assert_raises(TypeError, fail) - assert_(issubclass(self.B.isnumeric().dtype.type, np.bool)) - assert_array_equal(self.B.isnumeric(), [ + assert_(issubclass(B.isnumeric().dtype.type, np.bool)) + assert_array_equal(B.isnumeric(), [ [False, False], [True, False], [False, False]]) def test_isdecimal(self): + A, B = self.A(), self.B() def fail(): - self.A.isdecimal() + A.isdecimal() assert_raises(TypeError, fail) - assert_(issubclass(self.B.isdecimal().dtype.type, np.bool)) - assert_array_equal(self.B.isdecimal(), [ + assert_(issubclass(B.isdecimal().dtype.type, np.bool)) + assert_array_equal(B.isdecimal(), [ [False, False], [True, False], [False, False]]) - class TestOperations: - def setup_method(self): - self.A = np.array([['abc', '123'], - ['789', 'xyz']]).view(np.char.chararray) - self.B = np.array([['efg', '456'], - ['051', 'tuv']]).view(np.char.chararray) + def A(self): + return np.array([['abc', '123'], + ['789', 'xyz']]).view(np.char.chararray) + + def B(self): + return np.array([['efg', '456'], + ['051', 'tuv']]).view(np.char.chararray) + + def test_argsort(self): + arr = np.array(['abc'] * 4).view(np.char.chararray) + actual = arr.argsort(stable=True) + assert_array_equal(actual, [0, 1, 2, 3]) def test_add(self): + A, B = self.A(), self.B() AB = np.array([['abcefg', '123456'], ['789051', 'xyztuv']]).view(np.char.chararray) - assert_array_equal(AB, (self.A + self.B)) - assert_(len((self.A + self.B)[0][0]) == 6) + assert_array_equal(AB, (A + B)) + assert_(len((A + B)[0][0]) == 6) def test_radd(self): + A = self.A() QA = np.array([['qabc', 'q123'], ['q789', 'qxyz']]).view(np.char.chararray) - assert_array_equal(QA, ('q' + self.A)) + assert_array_equal(QA, ('q' + A)) def test_mul(self): - A = self.A + A = self.A() for r in (2, 3, 5, 7, 197): Ar = np.array([[A[0, 0] * r, A[0, 1] * r], [A[1, 0] * r, A[1, 1] * r]]).view(np.char.chararray) - assert_array_equal(Ar, (self.A * r)) + assert_array_equal(Ar, (A * r)) for ob in [object(), 'qrs']: with assert_raises_regex(ValueError, @@ -687,11 +726,11 @@ def test_mul(self): A * ob def test_rmul(self): - A = self.A + A = self.A() for r in (2, 3, 5, 7, 197): Ar = np.array([[A[0, 0] * r, A[0, 1] * r], [A[1, 0] * r, A[1, 1] * r]]).view(np.char.chararray) - assert_array_equal(Ar, (r * self.A)) + assert_array_equal(Ar, (r * A)) for ob in [object(), 'qrs']: with assert_raises_regex(ValueError, @@ -716,13 +755,14 @@ def test_mod(self): assert_array_equal(A2, (A % [[1, 2], [3, 4]])) def test_rmod(self): - assert_(f"{self.A}" == str(self.A)) - assert_(f"{self.A!r}" == repr(self.A)) + A = self.A() + assert_(f"{A}" == str(A)) + assert_(f"{A!r}" == repr(A)) for ob in [42, object()]: with assert_raises_regex( TypeError, "unsupported operand type.* and 'chararray'"): - ob % self.A + ob % A def test_slice(self): """Regression test for https://github.com/numpy/numpy/issues/5982""" @@ -751,27 +791,21 @@ def test_getitem_length_zero_item(self, data): # or does not have length 0. assert_equal(a[1], a.dtype.type()) - class TestMethodsEmptyArray: - def setup_method(self): - self.U = np.array([], dtype='U') - self.S = np.array([], dtype='S') - def test_encode(self): - res = np.char.encode(self.U) + res = np.char.encode(np.array([], dtype='U')) assert_array_equal(res, []) assert_(res.dtype.char == 'S') def test_decode(self): - res = np.char.decode(self.S) + res = np.char.decode(np.array([], dtype='S')) assert_array_equal(res, []) assert_(res.dtype.char == 'U') def test_decode_with_reshape(self): - res = np.char.decode(self.S.reshape((1, 0, 1))) + res = np.char.decode(np.array([], dtype='S').reshape((1, 0, 1))) assert_(res.shape == (1, 0, 1)) - class TestMethodsScalarValues: def test_mod(self): A = np.array([[' abc ', ''], @@ -816,7 +850,6 @@ def test_replace(self): assert_equal(np.char.replace('Python is good', 'good', 'great'), 'Python is great') - def test_empty_indexing(): """Regression test for ticket 1948.""" # Check that indexing a chararray with an empty list/array returns an diff --git a/numpy/_core/tests/test_deprecations.py b/numpy/_core/tests/test_deprecations.py index d90c15565c22..7cb1fee9b890 100644 --- a/numpy/_core/tests/test_deprecations.py +++ b/numpy/_core/tests/test_deprecations.py @@ -6,18 +6,12 @@ import contextlib import warnings -import numpy._core._struct_ufunc_tests as struct_ufunc import pytest -from numpy._core._multiarray_tests import fromstring_null_term_c_api # noqa: F401 import numpy as np -from numpy.testing import assert_raises, temppath - -try: - import pytz # noqa: F401 - _has_pytz = True -except ImportError: - _has_pytz = False +import numpy._core._struct_ufunc_tests as struct_ufunc +from numpy._core._multiarray_tests import fromstring_null_term_c_api # noqa: F401 +from numpy.testing import assert_raises class _DeprecationTestCase: @@ -26,22 +20,20 @@ class _DeprecationTestCase: message = '' warning_cls = DeprecationWarning - def setup_method(self): - self.warn_ctx = warnings.catch_warnings(record=True) - self.log = self.warn_ctx.__enter__() - - # Do *not* ignore other DeprecationWarnings. Ignoring warnings - # can give very confusing results because of - # https://bugs.python.org/issue4180 and it is probably simplest to - # try to keep the tests cleanly giving only the right warning type. - # (While checking them set to "error" those are ignored anyway) - # We still have them show up, because otherwise they would be raised - warnings.filterwarnings("always", category=self.warning_cls) - warnings.filterwarnings("always", message=self.message, - category=self.warning_cls) - - def teardown_method(self): - self.warn_ctx.__exit__() + @contextlib.contextmanager + def filter_warnings(self): + with warnings.catch_warnings(record=True) as w: + # Do *not* ignore other DeprecationWarnings. Ignoring warnings + # can give very confusing results because of + # https://bugs.python.org/issue4180 and it is probably simplest to + # try to keep the tests cleanly giving only the right warning type. + # (While checking them set to "error" those are ignored anyway) + # We still have them show up, because otherwise they would be raised + warnings.filterwarnings("always", category=self.warning_cls) + warnings.filterwarnings("always", message=self.message, + category=self.warning_cls) + yield w + return def assert_deprecated(self, function, num=1, ignore_others=False, function_fails=False, @@ -78,9 +70,6 @@ def assert_deprecated(self, function, num=1, ignore_others=False, """ __tracebackhide__ = True # Hide traceback for py.test - # reset the log - self.log[:] = [] - if exceptions is np._NoValue: exceptions = (self.warning_cls,) @@ -89,11 +78,12 @@ def assert_deprecated(self, function, num=1, ignore_others=False, else: context_manager = contextlib.nullcontext() with context_manager: - function(*args, **kwargs) + with self.filter_warnings() as w_context: + function(*args, **kwargs) # just in case, clear the registry num_found = 0 - for warning in self.log: + for warning in w_context: if warning.category is self.warning_cls: num_found += 1 elif not ignore_others: @@ -101,8 +91,8 @@ def assert_deprecated(self, function, num=1, ignore_others=False, "expected %s but got: %s" % (self.warning_cls.__name__, warning.category)) if num is not None and num_found != num: - msg = f"{len(self.log)} warnings found but {num} expected." - lst = [str(w) for w in self.log] + msg = f"{len(w_context)} warnings found but {num} expected." + lst = [str(w) for w in w_context] raise AssertionError("\n".join([msg] + lst)) with warnings.catch_warnings(): @@ -137,7 +127,6 @@ class _VisibleDeprecationTestCase(_DeprecationTestCase): class TestTestDeprecated: def test_assert_deprecated(self): test_case_instance = _DeprecationTestCase() - test_case_instance.setup_method() assert_raises(AssertionError, test_case_instance.assert_deprecated, lambda: None) @@ -146,7 +135,6 @@ def foo(): warnings.warn("foo", category=DeprecationWarning, stacklevel=2) test_case_instance.assert_deprecated(foo) - test_case_instance.teardown_method() class TestBincount(_DeprecationTestCase): @@ -157,12 +145,6 @@ def test_bincount_bad_list(self, badlist): self.assert_deprecated(lambda: np.bincount(badlist)) -class TestGeneratorSum(_DeprecationTestCase): - # 2018-02-25, 1.15.0 - def test_generator_sum(self): - self.assert_deprecated(np.sum, args=((i for i in range(5)),)) - - class BuiltInRoundComplexDType(_DeprecationTestCase): # 2020-03-31 1.19.0 deprecated_types = [np.csingle, np.cdouble, np.clongdouble] @@ -209,64 +191,13 @@ def test_not_deprecated(self): class TestCtypesGetter(_DeprecationTestCase): - # Deprecated 2021-05-18, Numpy 1.21.0 - warning_cls = DeprecationWarning ctypes = np.array([1]).ctypes - @pytest.mark.parametrize( - "name", ["get_data", "get_shape", "get_strides", "get_as_parameter"] - ) - def test_deprecated(self, name: str) -> None: - func = getattr(self.ctypes, name) - self.assert_deprecated(func) - - @pytest.mark.parametrize( - "name", ["data", "shape", "strides", "_as_parameter_"] - ) + @pytest.mark.parametrize("name", ["data", "shape", "strides", "_as_parameter_"]) def test_not_deprecated(self, name: str) -> None: self.assert_not_deprecated(lambda: getattr(self.ctypes, name)) -class TestMachAr(_DeprecationTestCase): - # Deprecated 2022-11-22, NumPy 1.25 - warning_cls = DeprecationWarning - - def test_deprecated_module(self): - self.assert_deprecated(lambda: np._core.MachAr) - - -class TestQuantileInterpolationDeprecation(_DeprecationTestCase): - # Deprecated 2021-11-08, NumPy 1.22 - @pytest.mark.parametrize("func", - [np.percentile, np.quantile, np.nanpercentile, np.nanquantile]) - def test_deprecated(self, func): - self.assert_deprecated( - lambda: func([0., 1.], 0., interpolation="linear")) - self.assert_deprecated( - lambda: func([0., 1.], 0., interpolation="nearest")) - - @pytest.mark.parametrize("func", - [np.percentile, np.quantile, np.nanpercentile, np.nanquantile]) - def test_both_passed(self, func): - with warnings.catch_warnings(): - # catch the DeprecationWarning so that it does not raise: - warnings.simplefilter("always", DeprecationWarning) - with pytest.raises(TypeError): - func([0., 1.], 0., interpolation="nearest", method="nearest") - - -class TestScalarConversion(_DeprecationTestCase): - # 2023-01-02, 1.25.0 - def test_float_conversion(self): - self.assert_deprecated(float, args=(np.array([3.14]),)) - - def test_behaviour(self): - b = np.array([[3.14]]) - c = np.zeros(5) - with pytest.warns(DeprecationWarning): - c[0] = b - - class TestPyIntConversion(_DeprecationTestCase): message = r".*stop allowing conversion of out-of-bound.*" @@ -343,9 +274,8 @@ def test_deprecated_np_lib_math(self): class TestLibImports(_DeprecationTestCase): # Deprecated in Numpy 1.26.0, 2023-09 def test_lib_functions_deprecation_call(self): - from numpy import in1d, row_stack, trapz + from numpy import row_stack from numpy._core.numerictypes import maximum_sctype - from numpy.lib._function_base_impl import disp from numpy.lib._npyio_impl import recfromcsv, recfromtxt from numpy.lib._shape_base_impl import get_array_wrap from numpy.lib._utils_impl import safe_eval @@ -358,25 +288,20 @@ def test_lib_functions_deprecation_call(self): self.assert_deprecated(lambda: recfromcsv(data_gen())) self.assert_deprecated(lambda: recfromtxt(data_gen(), **kwargs)) - self.assert_deprecated(lambda: disp("test")) self.assert_deprecated(get_array_wrap) self.assert_deprecated(lambda: maximum_sctype(int)) - self.assert_deprecated(lambda: in1d([1], [1])) self.assert_deprecated(lambda: row_stack([[]])) - self.assert_deprecated(lambda: trapz([1], [1])) self.assert_deprecated(lambda: np.chararray) class TestDeprecatedDTypeAliases(_DeprecationTestCase): def _check_for_warning(self, func): - with warnings.catch_warnings(record=True) as caught_warnings: + with pytest.warns(DeprecationWarning, + match="alias 'a' was deprecated in NumPy 2.0") as w: func() - assert len(caught_warnings) == 1 - w = caught_warnings[0] - assert w.category is DeprecationWarning - assert "alias 'a' was deprecated in NumPy 2.0" in str(w.message) + assert len(w) == 1 def test_a_dtype_alias(self): for dtype in ["a", "a10"]: @@ -412,6 +337,13 @@ def __array_wrap__(self, arr): self.assert_deprecated(lambda: np.negative(test2)) assert test2.called +class TestDeprecatedArrayAttributeSetting(_DeprecationTestCase): + message = "Setting the .*on a NumPy array has been deprecated.*" + + def test_deprecated_strides_set(self): + x = np.eye(2) + self.assert_deprecated(setattr, args=(x, 'strides', x.strides)) + class TestDeprecatedDTypeParenthesizedRepeatCount(_DeprecationTestCase): message = "Passing in a parenthesized single number" @@ -421,34 +353,108 @@ def test_parenthesized_repeat_count(self, string): self.assert_deprecated(np.dtype, args=(string,)) -class TestDeprecatedSaveFixImports(_DeprecationTestCase): - # Deprecated in Numpy 2.1, 2024-05 - message = "The 'fix_imports' flag is deprecated and has no effect." - - def test_deprecated(self): - with temppath(suffix='.npy') as path: - sample_args = (path, np.array(np.zeros((1024, 10)))) - self.assert_not_deprecated(np.save, args=sample_args) - self.assert_deprecated(np.save, args=sample_args, - kwargs={'fix_imports': True}) - self.assert_deprecated(np.save, args=sample_args, - kwargs={'fix_imports': False}) - for allow_pickle in [True, False]: - self.assert_not_deprecated(np.save, args=sample_args, - kwargs={'allow_pickle': allow_pickle}) - self.assert_deprecated(np.save, args=sample_args, - kwargs={'allow_pickle': allow_pickle, - 'fix_imports': True}) - self.assert_deprecated(np.save, args=sample_args, - kwargs={'allow_pickle': allow_pickle, - 'fix_imports': False}) - - class TestAddNewdocUFunc(_DeprecationTestCase): # Deprecated in Numpy 2.2, 2024-11 + @pytest.mark.thread_unsafe( + reason="modifies and checks docstring which is global state" + ) def test_deprecated(self): + doc = struct_ufunc.add_triplet.__doc__ + # gh-26718 + # This test mutates the C-level docstring pointer for add_triplet, + # which is permanent once set. Skip when re-running tests. + if doc is not None and "new docs" in doc: + pytest.skip("Cannot retest deprecation, otherwise ValueError: " + "Cannot change docstring of ufunc with non-NULL docstring") self.assert_deprecated( lambda: np._core.umath._add_newdoc_ufunc( struct_ufunc.add_triplet, "new docs" ) ) + + +class TestDTypeAlignBool(_VisibleDeprecationTestCase): + # Deprecated in Numpy 2.4, 2025-07 + # NOTE: As you can see, finalizing this deprecation breaks some (very) old + # pickle files. This may be fine, but needs to be done with some care since + # it breaks all of them and not just some. + # (Maybe it should be a 3.0 or only after warning more explicitly around pickles.) + message = r"dtype\(\): align should be passed as Python or NumPy boolean but got " + + def test_deprecated(self): + # in particular integers should be rejected because one may think they mean + # alignment, or pass them accidentally as a subarray shape (meaning to pass + # a tuple). + self.assert_deprecated(lambda: np.dtype("f8", align=3)) + + @pytest.mark.parametrize("align", [True, False, np.True_, np.False_]) + def test_not_deprecated(self, align): + # if the user passes a bool, it is accepted. + self.assert_not_deprecated(lambda: np.dtype("f8", align=align)) + + +class TestFlatiterIndexing0dBoolIndex(_DeprecationTestCase): + # Deprecated in Numpy 2.4, 2025-07 + message = r"Indexing flat iterators with a 0-dimensional boolean index" + + def test_0d_boolean_index_deprecated(self): + arr = np.arange(3) + # 0d boolean indices on flat iterators are deprecated + self.assert_deprecated(lambda: arr.flat[True]) + + def test_0d_boolean_assign_index_deprecated(self): + arr = np.arange(3) + + def assign_to_index(): + arr.flat[True] = 10 + + self.assert_deprecated(assign_to_index) + + +class TestFlatiterIndexingFloatIndex(_DeprecationTestCase): + # Deprecated in NumPy 2.4, 2025-07 + message = r"Invalid non-array indices for iterator objects" + + def test_float_index_deprecated(self): + arr = np.arange(3) + # float indices on flat iterators are deprecated + self.assert_deprecated(lambda: arr.flat[[1.]]) + + def test_float_assign_index_deprecated(self): + arr = np.arange(3) + + def assign_to_index(): + arr.flat[[1.]] = 10 + + self.assert_deprecated(assign_to_index) + + +@pytest.mark.thread_unsafe( + reason="warning control utilities are deprecated due to being thread-unsafe" +) +class TestWarningUtilityDeprecations(_DeprecationTestCase): + # Deprecation in NumPy 2.4, 2025-08 + message = r"NumPy warning suppression and assertion utilities are deprecated." + + def test_assert_warns_deprecated(self): + def use_assert_warns(): + with np.testing.assert_warns(RuntimeWarning): + warnings.warn("foo", RuntimeWarning, stacklevel=1) + + self.assert_deprecated(use_assert_warns) + + def test_suppress_warnings_deprecated(self): + def use_suppress_warnings(): + with np.testing.suppress_warnings() as sup: + sup.filter(RuntimeWarning, 'invalid value encountered in divide') + + self.assert_deprecated(use_suppress_warnings) + + +class TestTooManyArgsExtremum(_DeprecationTestCase): + # Deprecated in Numpy 2.4, 2025-08, gh-27639 + message = "Passing more than 2 positional arguments to np.maximum and np.minimum " + + @pytest.mark.parametrize("ufunc", [np.minimum, np.maximum]) + def test_extremem_3_args(self, ufunc): + self.assert_deprecated(ufunc, args=(np.ones(1), np.zeros(1), np.empty(1))) diff --git a/numpy/_core/tests/test_dlpack.py b/numpy/_core/tests/test_dlpack.py index 89c24032b6c1..e8198ac1823e 100644 --- a/numpy/_core/tests/test_dlpack.py +++ b/numpy/_core/tests/test_dlpack.py @@ -184,7 +184,7 @@ def test_device(self): np.from_dlpack(x, device="cpu") np.from_dlpack(x, device=None) - with pytest.raises(ValueError): + with pytest.raises(BufferError): x.__dlpack__(dl_device=(10, 0)) with pytest.raises(ValueError): np.from_dlpack(x, device="gpu") diff --git a/numpy/_core/tests/test_dtype.py b/numpy/_core/tests/test_dtype.py index 684672a9b71f..b1f965d5164b 100644 --- a/numpy/_core/tests/test_dtype.py +++ b/numpy/_core/tests/test_dtype.py @@ -1,8 +1,9 @@ +import contextlib import ctypes import gc +import inspect import operator import pickle -import random import sys import types from itertools import permutations @@ -11,20 +12,21 @@ import hypothesis import pytest from hypothesis.extra import numpy as hynp -from numpy._core._multiarray_tests import create_custom_field_dtype -from numpy._core._rational_tests import rational import numpy as np import numpy.dtypes +from numpy._core._multiarray_tests import create_custom_field_dtype +from numpy._core._rational_tests import rational from numpy.testing import ( HAS_REFCOUNT, - IS_PYSTON, - IS_WASM, + IS_64BIT, + IS_PYPY, assert_, assert_array_equal, assert_equal, assert_raises, ) +from numpy.testing._private.utils import requires_deep_recursion def assert_dtype_equal(a, b): @@ -210,7 +212,7 @@ def test_field_order_equality(self): 'formats': ['i4', 'f4'], 'offsets': [4, 0]}) assert_equal(x == y, False) - # This is an safe cast (not equiv) due to the different names: + # This is a safe cast (not equiv) due to the different names: assert np.can_cast(x, y, casting="safe") @pytest.mark.parametrize( @@ -975,25 +977,24 @@ def test1(self): ('yi', np.dtype((a, (3, 2))))]) assert_dtype_equal(c, d) - @pytest.mark.skipif(IS_PYSTON, reason="Pyston disables recursion checking") - @pytest.mark.skipif(IS_WASM, reason="Pyodide/WASM has limited stack size") + @requires_deep_recursion def test_list_recursion(self): l = [] l.append(('f', l)) with pytest.raises(RecursionError): np.dtype(l) - @pytest.mark.skipif(IS_PYSTON, reason="Pyston disables recursion checking") - @pytest.mark.skipif(IS_WASM, reason="Pyodide/WASM has limited stack size") + @requires_deep_recursion def test_tuple_recursion(self): d = np.int32 for i in range(100000): d = (d, (1,)) - with pytest.raises(RecursionError): + # depending on OS and Python version, this might succeed + # see gh-30370 and cpython issue #142253 + with contextlib.suppress(RecursionError): np.dtype(d) - @pytest.mark.skipif(IS_PYSTON, reason="Pyston disables recursion checking") - @pytest.mark.skipif(IS_WASM, reason="Pyodide/WASM has limited stack size") + @requires_deep_recursion def test_dict_recursion(self): d = {"names": ['self'], "formats": [None], "offsets": [0]} d['formats'][0] = d @@ -1327,8 +1328,9 @@ def test_make_canonical_hypothesis(self, dtype): @hypothesis.given( dtype=hypothesis.extra.numpy.array_dtypes( subtype_strategy=hypothesis.extra.numpy.array_dtypes(), - min_size=5, max_size=10, allow_subarrays=True)) - def test_structured(self, dtype): + min_size=5, max_size=10, allow_subarrays=True), + random=hypothesis.strategies.randoms()) + def test_structured(self, dtype, random): # Pick 4 of the fields at random. This will leave empty space in the # dtype (since we do not canonicalize it here). field_subset = random.sample(dtype.names, k=4) @@ -1374,6 +1376,15 @@ def check_pickling(self, dtype): assert_equal(x, y) assert_equal(x[0], y[0]) + @pytest.mark.skipif(not IS_64BIT, reason="test requires 64-bit system") + @pytest.mark.xfail(reason="dtype conversion doesn't allow this yet.") + def test_pickling_large(self): + # The actual itemsize is larger than a c-integer here. + dtype = np.dtype(f"({2**31},)i") + self.check_pickling(dtype) + dtype = np.dtype(f"({2**31},)i", metadata={"a": "b"}) + self.check_pickling(dtype) + @pytest.mark.parametrize('t', [int, float, complex, np.int32, str, object, bool]) def test_builtin(self, t): @@ -1438,7 +1449,7 @@ def test_pickle_dtype(self, dt): for proto in range(pickle.HIGHEST_PROTOCOL + 1): roundtrip_dt = pickle.loads(pickle.dumps(dt, proto)) assert roundtrip_dt == dt - assert hash(dt) == pre_pickle_hash + assert hash(roundtrip_dt) == pre_pickle_hash class TestPromotion: @@ -1580,19 +1591,19 @@ class dt: assert np.dtype(dt) == np.float64 assert np.dtype(dt()) == np.float64 - @pytest.mark.skipif(IS_PYSTON, reason="Pyston disables recursion checking") - @pytest.mark.skipif(IS_WASM, reason="Pyodide/WASM has limited stack size") - def test_recursion(self): + def test_recursive(self): + # This used to recurse. It now doesn't, we enforce the + # dtype attribute to be a dtype (and will not recurse). class dt: pass dt.dtype = dt - with pytest.raises(RecursionError): + with pytest.raises(ValueError): np.dtype(dt) dt_instance = dt() dt_instance.dtype = dt - with pytest.raises(RecursionError): + with pytest.raises(ValueError): np.dtype(dt_instance) def test_void_subtype(self): @@ -1605,21 +1616,58 @@ class dt(np.void): np.dtype(dt) np.dtype(dt(1)) - @pytest.mark.skipif(IS_PYSTON, reason="Pyston disables recursion checking") - @pytest.mark.skipif(IS_WASM, reason="Pyodide/WASM has limited stack size") - def test_void_subtype_recursion(self): + def test_void_subtype_recursive(self): + # Used to recurse, but dtype is now enforced to be a dtype instance + # so that we do not recurse. class vdt(np.void): pass vdt.dtype = vdt - with pytest.raises(RecursionError): + with pytest.raises(ValueError): np.dtype(vdt) - with pytest.raises(RecursionError): + with pytest.raises(ValueError): np.dtype(vdt(1)) +class TestFromDTypeProtocol: + def test_simple(self): + class A: + dtype = np.dtype("f8") + + assert np.dtype(A()) == np.dtype(np.float64) + + def test_not_a_dtype(self): + # This also prevents coercion as a trivial path, although + # a custom error may be nicer. + class ArrayLike: + __numpy_dtype__ = None + dtype = np.dtype("f8") + + with pytest.raises(ValueError, match=".*__numpy_dtype__.*"): + np.dtype(ArrayLike()) + + def test_prevent_dtype_explicit(self): + class ArrayLike: + @property + def __numpy_dtype__(self): + raise RuntimeError("my error!") + + with pytest.raises(RuntimeError, match="my error!"): + np.dtype(ArrayLike()) + + def test_type_object(self): + class TypeWithProperty: + @property + def __numpy_dtype__(self): + raise RuntimeError("not reached") + + # Arbitrary types go to object currently, and the + # protocol doesn't prevent that. + assert np.dtype(TypeWithProperty) == object + + class TestDTypeClasses: @pytest.mark.parametrize("dtype", list(np.typecodes['All']) + [rational]) def test_basic_dtypes_subclass_properties(self, dtype): @@ -1687,9 +1735,8 @@ def test_integer_alias_names(self, int_, size): @pytest.mark.parametrize("name", ["Half", "Float", "Double", "CFloat", "CDouble"]) - def test_float_alias_names(self, name): - with pytest.raises(AttributeError): - getattr(numpy.dtypes, name + "DType") is numpy.dtypes.Float16DType + def test_float_alias_names_not_present(self, name): + assert not hasattr(numpy.dtypes, f"{name}DType") def test_scalar_helper_all_dtypes(self): for dtype in np.dtypes.__all__: @@ -1788,7 +1835,12 @@ class Union(ctypes.Union): ] expected = np.dtype({ "names": ['a', 'b', 'c', 'd'], - "formats": ['u1', np.uint16, np.uint32, [('one', 'u1'), ('two', np.uint32)]], + "formats": [ + 'u1', + np.uint16, + np.uint32, + [('one', 'u1'), ('two', np.uint32)], + ], "offsets": [0, 0, 0, 0], "itemsize": ctypes.sizeof(Union) }) @@ -1812,7 +1864,12 @@ class Union(ctypes.Union): ] expected = np.dtype({ "names": ['a', 'b', 'c', 'd'], - "formats": ['u1', np.uint16, np.uint32, [('one', 'u1'), ('two', np.uint32)]], + "formats": [ + 'u1', + np.uint16, + np.uint32, + [('one', 'u1'), ('two', np.uint32)], + ], "offsets": [0, 0, 0, 0], "itemsize": ctypes.sizeof(Union) }) @@ -1844,7 +1901,15 @@ class PackedStructure(ctypes.Structure): ('g', ctypes.c_uint8) ] expected = np.dtype({ - "formats": [np.uint8, np.uint16, np.uint8, np.uint16, np.uint32, np.uint32, np.uint8], + "formats": [ + np.uint8, + np.uint16, + np.uint8, + np.uint16, + np.uint32, + np.uint32, + np.uint8, + ], "offsets": [0, 2, 4, 6, 8, 12, 16], "names": ['a', 'b', 'c', 'd', 'e', 'f', 'g'], "itemsize": 18}) @@ -1917,6 +1982,9 @@ def test_pairs(self, pair): class TestUserDType: @pytest.mark.leaks_references(reason="dynamically creates custom dtype.") + @pytest.mark.thread_unsafe( + reason="crashes when GIL disabled, dtype setup is thread-unsafe", + ) def test_custom_structured_dtype(self): class mytype: pass @@ -1937,6 +2005,9 @@ class mytype: del a assert sys.getrefcount(o) == startcount + @pytest.mark.thread_unsafe( + reason="crashes when GIL disabled, dtype setup is thread-unsafe", + ) def test_custom_structured_dtype_errors(self): class mytype: pass @@ -1993,3 +2064,65 @@ def test_creating_dtype_with_dtype_class_errors(): # Regression test for #25031, calling `np.dtype` with itself segfaulted. with pytest.raises(TypeError, match="Cannot convert np.dtype into a"): np.array(np.ones(10), dtype=np.dtype) + + +@pytest.mark.skipif(sys.flags.optimize == 2, reason="Python running -OO") +@pytest.mark.skipif(IS_PYPY, reason="PyPy does not modify tp_doc") +class TestDTypeSignatures: + def test_signature_dtype(self): + sig = inspect.signature(np.dtype) + + assert len(sig.parameters) == 4 + + assert "dtype" in sig.parameters + assert sig.parameters["dtype"].kind is inspect.Parameter.POSITIONAL_OR_KEYWORD + assert sig.parameters["dtype"].default is inspect.Parameter.empty + + assert "align" in sig.parameters + assert sig.parameters["align"].kind is inspect.Parameter.POSITIONAL_OR_KEYWORD + assert sig.parameters["align"].default is False + + assert "copy" in sig.parameters + assert sig.parameters["copy"].kind is inspect.Parameter.POSITIONAL_OR_KEYWORD + assert sig.parameters["copy"].default is False + + # the optional `metadata` parameter has no default, so `**kwargs` must be used + assert "kwargs" in sig.parameters + assert sig.parameters["kwargs"].kind is inspect.Parameter.VAR_KEYWORD + assert sig.parameters["kwargs"].default is inspect.Parameter.empty + + def test_signature_dtype_newbyteorder(self): + sig = inspect.signature(np.dtype.newbyteorder) + + assert len(sig.parameters) == 2 + + assert "self" in sig.parameters + assert sig.parameters["self"].kind is inspect.Parameter.POSITIONAL_ONLY + assert sig.parameters["self"].default is inspect.Parameter.empty + + assert "new_order" in sig.parameters + assert sig.parameters["new_order"].kind is inspect.Parameter.POSITIONAL_ONLY + assert sig.parameters["new_order"].default == "S" + + @pytest.mark.parametrize("typename", np.dtypes.__all__) + def test_signature_dtypes_classes(self, typename: str): + dtype_type = getattr(np.dtypes, typename) + sig = inspect.signature(dtype_type) + + match typename.lower().removesuffix("dtype"): + case "bytes" | "str": + params_expect = {"size"} + case "void": + params_expect = {"length"} + case "datetime64" | "timedelta64": + params_expect = {"unit"} + case "string": + # `na_object` cannot be used in the text signature because of its + # `np._NoValue` default, which isn't supported by `inspect.signature`, + # so `**kwargs` is used instead. + params_expect = {"coerce", "kwargs"} + case _: + params_expect = set() + + params_actual = set(sig.parameters) + assert params_actual == params_expect diff --git a/numpy/_core/tests/test_einsum.py b/numpy/_core/tests/test_einsum.py index 0bd180b5e41f..375ef03c1dd7 100644 --- a/numpy/_core/tests/test_einsum.py +++ b/numpy/_core/tests/test_einsum.py @@ -1,4 +1,5 @@ import itertools +import warnings import pytest @@ -11,7 +12,6 @@ assert_equal, assert_raises, assert_raises_regex, - suppress_warnings, ) # Setup for optimize einsum @@ -79,6 +79,11 @@ def test_einsum_errors(self, do_opt, einsum_fn): b = np.ones((3, 4, 5)) einsum_fn('aabcb,abc', a, b) + with pytest.raises(ValueError): + a = np.arange(3) + # einsum_path does not yet accept kwarg 'casting' + np.einsum('ij->j', [a, a], casting='same_value') + def test_einsum_sorting_behavior(self): # Case 1: 26 dimensions (all lowercase indices) n1 = 26 @@ -231,21 +236,20 @@ def __rmul__(self, other): def test_einsum_views(self): # pass-through for do_opt in [True, False]: - a = np.arange(6) - a.shape = (2, 3) + a = np.arange(6).reshape((2, 3)) b = np.einsum("...", a, optimize=do_opt) - assert_(b.base is a) + assert_(b.base is a.base) b = np.einsum(a, [Ellipsis], optimize=do_opt) - assert_(b.base is a) + assert_(b.base is a.base) b = np.einsum("ij", a, optimize=do_opt) - assert_(b.base is a) + assert_(b.base is a.base) assert_equal(b, a) b = np.einsum(a, [0, 1], optimize=do_opt) - assert_(b.base is a) + assert_(b.base is a.base) assert_equal(b, a) # output is writeable whenever input is writeable @@ -256,115 +260,110 @@ def test_einsum_views(self): assert_(not b.flags['WRITEABLE']) # transpose - a = np.arange(6) - a.shape = (2, 3) + a = np.arange(6).reshape((2, 3)) b = np.einsum("ji", a, optimize=do_opt) - assert_(b.base is a) + assert_(b.base is a.base) assert_equal(b, a.T) b = np.einsum(a, [1, 0], optimize=do_opt) - assert_(b.base is a) + assert_(b.base is a.base) assert_equal(b, a.T) # diagonal - a = np.arange(9) - a.shape = (3, 3) + a = np.arange(9).reshape((3, 3)) b = np.einsum("ii->i", a, optimize=do_opt) - assert_(b.base is a) + assert_(b.base is a.base) assert_equal(b, [a[i, i] for i in range(3)]) b = np.einsum(a, [0, 0], [0], optimize=do_opt) - assert_(b.base is a) + assert_(b.base is a.base) assert_equal(b, [a[i, i] for i in range(3)]) # diagonal with various ways of broadcasting an additional dimension - a = np.arange(27) - a.shape = (3, 3, 3) + a = np.arange(27).reshape((3, 3, 3)) b = np.einsum("...ii->...i", a, optimize=do_opt) - assert_(b.base is a) + assert_(b.base is a.base) assert_equal(b, [[x[i, i] for i in range(3)] for x in a]) b = np.einsum(a, [Ellipsis, 0, 0], [Ellipsis, 0], optimize=do_opt) - assert_(b.base is a) + assert_(b.base is a.base) assert_equal(b, [[x[i, i] for i in range(3)] for x in a]) b = np.einsum("ii...->...i", a, optimize=do_opt) - assert_(b.base is a) + assert_(b.base is a.base) assert_equal(b, [[x[i, i] for i in range(3)] for x in a.transpose(2, 0, 1)]) b = np.einsum(a, [0, 0, Ellipsis], [Ellipsis, 0], optimize=do_opt) - assert_(b.base is a) + assert_(b.base is a.base) assert_equal(b, [[x[i, i] for i in range(3)] for x in a.transpose(2, 0, 1)]) b = np.einsum("...ii->i...", a, optimize=do_opt) - assert_(b.base is a) + assert_(b.base is a.base) assert_equal(b, [a[:, i, i] for i in range(3)]) b = np.einsum(a, [Ellipsis, 0, 0], [0, Ellipsis], optimize=do_opt) - assert_(b.base is a) + assert_(b.base is a.base) assert_equal(b, [a[:, i, i] for i in range(3)]) b = np.einsum("jii->ij", a, optimize=do_opt) - assert_(b.base is a) + assert_(b.base is a.base) assert_equal(b, [a[:, i, i] for i in range(3)]) b = np.einsum(a, [1, 0, 0], [0, 1], optimize=do_opt) - assert_(b.base is a) + assert_(b.base is a.base) assert_equal(b, [a[:, i, i] for i in range(3)]) b = np.einsum("ii...->i...", a, optimize=do_opt) - assert_(b.base is a) + assert_(b.base is a.base) assert_equal(b, [a.transpose(2, 0, 1)[:, i, i] for i in range(3)]) b = np.einsum(a, [0, 0, Ellipsis], [0, Ellipsis], optimize=do_opt) - assert_(b.base is a) + assert_(b.base is a.base) assert_equal(b, [a.transpose(2, 0, 1)[:, i, i] for i in range(3)]) b = np.einsum("i...i->i...", a, optimize=do_opt) - assert_(b.base is a) + assert_(b.base is a.base) assert_equal(b, [a.transpose(1, 0, 2)[:, i, i] for i in range(3)]) b = np.einsum(a, [0, Ellipsis, 0], [0, Ellipsis], optimize=do_opt) - assert_(b.base is a) + assert_(b.base is a.base) assert_equal(b, [a.transpose(1, 0, 2)[:, i, i] for i in range(3)]) b = np.einsum("i...i->...i", a, optimize=do_opt) - assert_(b.base is a) + assert_(b.base is a.base) assert_equal(b, [[x[i, i] for i in range(3)] for x in a.transpose(1, 0, 2)]) b = np.einsum(a, [0, Ellipsis, 0], [Ellipsis, 0], optimize=do_opt) - assert_(b.base is a) + assert_(b.base is a.base) assert_equal(b, [[x[i, i] for i in range(3)] for x in a.transpose(1, 0, 2)]) # triple diagonal - a = np.arange(27) - a.shape = (3, 3, 3) + a = np.arange(27).reshape((3, 3, 3)) b = np.einsum("iii->i", a, optimize=do_opt) - assert_(b.base is a) + assert_(b.base is a.base) assert_equal(b, [a[i, i, i] for i in range(3)]) b = np.einsum(a, [0, 0, 0], [0], optimize=do_opt) - assert_(b.base is a) + assert_(b.base is a.base) assert_equal(b, [a[i, i, i] for i in range(3)]) # swap axes - a = np.arange(24) - a.shape = (2, 3, 4) + a = np.arange(24).reshape((2, 3, 4)) b = np.einsum("ijk->jik", a, optimize=do_opt) - assert_(b.base is a) + assert_(b.base is a.base) assert_equal(b, a.swapaxes(0, 1)) b = np.einsum(a, [0, 1, 2], [1, 0, 2], optimize=do_opt) - assert_(b.base is a) + assert_(b.base is a.base) assert_equal(b, a.swapaxes(0, 1)) def check_einsum_sums(self, dtype, do_opt=False): @@ -455,8 +454,8 @@ def check_einsum_sums(self, dtype, do_opt=False): np.outer(a, b)) # Suppress the complex warnings for the 'as f8' tests - with suppress_warnings() as sup: - sup.filter(np.exceptions.ComplexWarning) + with warnings.catch_warnings(): + warnings.simplefilter('ignore', np.exceptions.ComplexWarning) # matvec(a,b) / a.dot(b) where a is matrix, b is vector for n in range(1, 17): @@ -1120,6 +1119,41 @@ def test_output_order(self): tmp = np.einsum('...ft,mf->...mt', d, c, order='a', optimize=opt) assert_(tmp.flags.c_contiguous) + def test_singleton_broadcasting(self): + eq = "ijp,ipq,ikq->ijk" + shapes = ((3, 1, 1), (3, 1, 3), (1, 3, 3)) + arrays = [np.random.rand(*shape) for shape in shapes] + self.optimize_compare(eq, operands=arrays) + + eq = "jhcabhijaci,dfijejgh->fgje" + shapes = ( + (1, 1, 1, 1, 3, 1, 1, 1, 1, 1, 1), + (3, 1, 3, 1, 1, 1, 1, 2), + ) + arrays = [np.random.rand(*shape) for shape in shapes] + self.optimize_compare(eq, operands=arrays) + + eq = "baegffahgc,hdggeff->dhg" + shapes = ((2, 1, 4, 1, 1, 1, 1, 2, 1, 1), (1, 1, 1, 1, 4, 1, 1)) + arrays = [np.random.rand(*shape) for shape in shapes] + self.optimize_compare(eq, operands=arrays) + + eq = "cehgbaifff,fhhdegih->cdghbi" + shapes = ((1, 1, 1, 1, 1, 1, 1, 1, 1, 1), (2, 1, 1, 2, 4, 1, 1, 1)) + arrays = [np.random.rand(*shape) for shape in shapes] + self.optimize_compare(eq, operands=arrays) + + eq = "gah,cdbcghefg->ef" + shapes = ((2, 3, 1), (1, 3, 1, 1, 1, 2, 1, 4, 1)) + arrays = [np.random.rand(*shape) for shape in shapes] + self.optimize_compare(eq, operands=arrays) + + eq = "cacc,bcb->" + shapes = ((1, 1, 1, 1), (1, 4, 1)) + arrays = [np.random.rand(*shape) for shape in shapes] + self.optimize_compare(eq, operands=arrays) + + class TestEinsumPath: def build_operands(self, string, size_dict=global_size_dict): diff --git a/numpy/_core/tests/test_errstate.py b/numpy/_core/tests/test_errstate.py index b72fb65a3239..f0735a045a4d 100644 --- a/numpy/_core/tests/test_errstate.py +++ b/numpy/_core/tests/test_errstate.py @@ -87,7 +87,7 @@ def test_errstate_enter_once(self): @pytest.mark.skipif(IS_WASM, reason="wasm doesn't support asyncio") def test_asyncio_safe(self): - # asyncio may not always work, lets assume its fine if missing + # asyncio may not always work, let's assume its fine if missing # Pyodide/wasm doesn't support it. If this test makes problems, # it should just be skipped liberally (or run differently). asyncio = pytest.importorskip("asyncio") diff --git a/numpy/_core/tests/test_extint128.py b/numpy/_core/tests/test_extint128.py index 1a05151ac6be..6e4d74b81d39 100644 --- a/numpy/_core/tests/test_extint128.py +++ b/numpy/_core/tests/test_extint128.py @@ -2,10 +2,10 @@ import itertools import operator -import numpy._core._multiarray_tests as mt import pytest import numpy as np +import numpy._core._multiarray_tests as mt from numpy.testing import assert_equal, assert_raises INT64_MAX = np.iinfo(np.int64).max diff --git a/numpy/_core/tests/test_finfo.py b/numpy/_core/tests/test_finfo.py new file mode 100644 index 000000000000..5703b8d6a765 --- /dev/null +++ b/numpy/_core/tests/test_finfo.py @@ -0,0 +1,86 @@ +import pytest + +import numpy as np +from numpy import exp2, log10 +from numpy._core import numerictypes as ntypes + + +class MachArLike: + """Minimal class to simulate machine arithmetic parameters.""" + def __init__(self, dtype, machep, negep, minexp, maxexp, nmant, iexp): + self.dtype = dtype + self.machep = machep + self.negep = negep + self.minexp = minexp + self.maxexp = maxexp + self.nmant = nmant + self.iexp = iexp + self.eps = exp2(dtype(-nmant)) + self.epsneg = exp2(dtype(negep)) + self.precision = int(-log10(self.eps)) + self.resolution = dtype(10) ** (-self.precision) + + +@pytest.fixture +def float16_ma(): + """Machine arithmetic parameters for float16.""" + f16 = ntypes.float16 + return MachArLike(f16, + machep=-10, + negep=-11, + minexp=-14, + maxexp=16, + nmant=10, + iexp=5) + + +@pytest.fixture +def float32_ma(): + """Machine arithmetic parameters for float32.""" + f32 = ntypes.float32 + return MachArLike(f32, + machep=-23, + negep=-24, + minexp=-126, + maxexp=128, + nmant=23, + iexp=8) + + +@pytest.fixture +def float64_ma(): + """Machine arithmetic parameters for float64.""" + f64 = ntypes.float64 + return MachArLike(f64, + machep=-52, + negep=-53, + minexp=-1022, + maxexp=1024, + nmant=52, + iexp=11) + + +@pytest.mark.parametrize("dtype,ma_fixture", [ + (np.half, "float16_ma"), + (np.float32, "float32_ma"), + (np.float64, "float64_ma"), +]) +@pytest.mark.parametrize("prop", [ + 'machep', 'negep', 'minexp', 'maxexp', 'nmant', 'iexp', + 'eps', 'epsneg', 'precision', 'resolution' +]) +@pytest.mark.thread_unsafe( + reason="complex fixture setup is thread-unsafe (pytest-dev/pytest#13768.)" +) +def test_finfo_properties(dtype, ma_fixture, prop, request): + """Test that finfo properties match expected machine arithmetic values.""" + ma = request.getfixturevalue(ma_fixture) + finfo = np.finfo(dtype) + + actual = getattr(finfo, prop) + expected = getattr(ma, prop) + + assert actual == expected, ( + f"finfo({dtype}) property '{prop}' mismatch: " + f"expected {expected}, got {actual}" + ) diff --git a/numpy/_core/tests/test_function_base.py b/numpy/_core/tests/test_function_base.py index c925cf1f77e5..c6e10397b3ff 100644 --- a/numpy/_core/tests/test_function_base.py +++ b/numpy/_core/tests/test_function_base.py @@ -32,7 +32,8 @@ def _is_armhf(): # Check if the current platform is ARMHF (32-bit ARM architecture) - return platform.machine().startswith('arm') and platform.architecture()[0] == '32bit' + architecture = platform.architecture() + return platform.machine().startswith('arm') and architecture[0] == '32bit' class PhysicalQuantity(float): def __new__(cls, value): @@ -491,7 +492,7 @@ def test_add_doc(self): # test that np.add_newdoc did attach a docstring successfully: tgt = "Current flat index into the array." assert_equal(np._core.flatiter.index.__doc__[:len(tgt)], tgt) - assert_(len(np._core.ufunc.identity.__doc__) > 300) + assert_(len(np._core.ufunc.identity.__doc__) > 250) assert_(len(np.lib._index_tricks_impl.mgrid.__doc__) > 300) @pytest.mark.skipif(sys.flags.optimize == 2, reason="Python running -OO") diff --git a/numpy/_core/tests/test_getlimits.py b/numpy/_core/tests/test_getlimits.py index 721c6ac6cdf9..4e911b89e89f 100644 --- a/numpy/_core/tests/test_getlimits.py +++ b/numpy/_core/tests/test_getlimits.py @@ -9,7 +9,6 @@ import numpy as np from numpy import double, half, longdouble, single from numpy._core import finfo, iinfo -from numpy._core.getlimits import _discovered_machar, _float_ma from numpy.testing import assert_, assert_equal, assert_raises ################################################## @@ -139,53 +138,20 @@ def test_instances(): finfo(np.int64(1)) -def assert_ma_equal(discovered, ma_like): - # Check MachAr-like objects same as calculated MachAr instances - for key, value in discovered.__dict__.items(): - assert_equal(value, getattr(ma_like, key)) - if hasattr(value, 'shape'): - assert_equal(value.shape, getattr(ma_like, key).shape) - assert_equal(value.dtype, getattr(ma_like, key).dtype) - - -def test_known_types(): - # Test we are correctly compiling parameters for known types - for ftype, ma_like in ((np.float16, _float_ma[16]), - (np.float32, _float_ma[32]), - (np.float64, _float_ma[64])): - assert_ma_equal(_discovered_machar(ftype), ma_like) - # Suppress warning for broken discovery of double double on PPC - with np.errstate(all='ignore'): - ld_ma = _discovered_machar(np.longdouble) - bytes = np.dtype(np.longdouble).itemsize - if (ld_ma.it, ld_ma.maxexp) == (63, 16384) and bytes in (12, 16): - # 80-bit extended precision - assert_ma_equal(ld_ma, _float_ma[80]) - elif (ld_ma.it, ld_ma.maxexp) == (112, 16384) and bytes == 16: - # IEE 754 128-bit - assert_ma_equal(ld_ma, _float_ma[128]) - - def test_subnormal_warning(): """Test that the subnormal is zero warning is not being raised.""" - with np.errstate(all='ignore'): - ld_ma = _discovered_machar(np.longdouble) - bytes = np.dtype(np.longdouble).itemsize with warnings.catch_warnings(record=True) as w: warnings.simplefilter('always') - if (ld_ma.it, ld_ma.maxexp) == (63, 16384) and bytes in (12, 16): - # 80-bit extended precision - ld_ma.smallest_subnormal - assert len(w) == 0 - elif (ld_ma.it, ld_ma.maxexp) == (112, 16384) and bytes == 16: - # IEE 754 128-bit - ld_ma.smallest_subnormal - assert len(w) == 0 - else: - # Double double - ld_ma.smallest_subnormal - # This test may fail on some platforms - assert len(w) == 0 + # Test for common float types + for dtype in [np.float16, np.float32, np.float64]: + f = finfo(dtype) + _ = f.smallest_subnormal + # Also test longdouble + with np.errstate(all='ignore'): + fld = finfo(np.longdouble) + _ = fld.smallest_subnormal + # Check no warnings were raised + assert len(w) == 0 def test_plausible_finfo(): diff --git a/numpy/_core/tests/test_half.py b/numpy/_core/tests/test_half.py index 68f17b2a5e14..3ced5b466a44 100644 --- a/numpy/_core/tests/test_half.py +++ b/numpy/_core/tests/test_half.py @@ -18,56 +18,63 @@ def assert_raises_fpe(strmatch, callable, *args, **kwargs): f"Did not raise floating point {strmatch} error") class TestHalf: - def setup_method(self): + def _create_arrays_all(self): # An array of all possible float16 values - self.all_f16 = np.arange(0x10000, dtype=uint16) - self.all_f16.dtype = float16 + all_f16 = np.arange(0x10000, dtype=uint16) + all_f16 = all_f16.view(float16) # NaN value can cause an invalid FP exception if HW is being used with np.errstate(invalid='ignore'): - self.all_f32 = np.array(self.all_f16, dtype=float32) - self.all_f64 = np.array(self.all_f16, dtype=float64) + all_f32 = np.array(all_f16, dtype=float32) + all_f64 = np.array(all_f16, dtype=float64) + return all_f16, all_f32, all_f64 + def _create_arrays_nonan(self): # An array of all non-NaN float16 values, in sorted order - self.nonan_f16 = np.concatenate( + nonan_f16 = np.concatenate( (np.arange(0xfc00, 0x7fff, -1, dtype=uint16), np.arange(0x0000, 0x7c01, 1, dtype=uint16))) - self.nonan_f16.dtype = float16 - self.nonan_f32 = np.array(self.nonan_f16, dtype=float32) - self.nonan_f64 = np.array(self.nonan_f16, dtype=float64) - - # An array of all finite float16 values, in sorted order - self.finite_f16 = self.nonan_f16[1:-1] - self.finite_f32 = self.nonan_f32[1:-1] - self.finite_f64 = self.nonan_f64[1:-1] + nonan_f16 = nonan_f16.view(float16) + nonan_f32 = np.array(nonan_f16, dtype=float32) + nonan_f64 = np.array(nonan_f16, dtype=float64) + return nonan_f16, nonan_f32, nonan_f64 + + def _create_arrays_finite(self): + nonan_f16, nonan_f32, nonan_f64 = self._create_arrays_nonan() + finite_f16 = nonan_f16[1:-1] + finite_f32 = nonan_f32[1:-1] + finite_f64 = nonan_f64[1:-1] + return finite_f16, finite_f32, finite_f64 def test_half_conversions(self): """Checks that all 16-bit values survive conversion to/from 32-bit and 64-bit float""" # Because the underlying routines preserve the NaN bits, every # value is preserved when converting to/from other floats. + all_f16, all_f32, all_f64 = self._create_arrays_all() + nonan_f16, _, _ = self._create_arrays_nonan() # Convert from float32 back to float16 with np.errstate(invalid='ignore'): - b = np.array(self.all_f32, dtype=float16) + b = np.array(all_f32, dtype=float16) # avoid testing NaNs due to differing bit patterns in Q/S NaNs b_nn = b == b - assert_equal(self.all_f16[b_nn].view(dtype=uint16), + assert_equal(all_f16[b_nn].view(dtype=uint16), b[b_nn].view(dtype=uint16)) # Convert from float64 back to float16 with np.errstate(invalid='ignore'): - b = np.array(self.all_f64, dtype=float16) + b = np.array(all_f64, dtype=float16) b_nn = b == b - assert_equal(self.all_f16[b_nn].view(dtype=uint16), + assert_equal(all_f16[b_nn].view(dtype=uint16), b[b_nn].view(dtype=uint16)) # Convert float16 to longdouble and back # This doesn't necessarily preserve the extra NaN bits, # so exclude NaNs. - a_ld = np.array(self.nonan_f16, dtype=np.longdouble) + a_ld = np.array(nonan_f16, dtype=np.longdouble) b = np.array(a_ld, dtype=float16) - assert_equal(self.nonan_f16.view(dtype=uint16), + assert_equal(nonan_f16.view(dtype=uint16), b.view(dtype=uint16)) # Check the range for which all integers can be represented @@ -86,6 +93,21 @@ def test_half_conversion_to_string(self, string_dt): arr = np.ones(3, dtype=np.float16).astype(string_dt) assert arr.dtype == expected_dt + @pytest.mark.parametrize("dtype", ["S", "U", object]) + def test_to_half_cast_error(self, dtype): + arr = np.array(["3M"], dtype=dtype) + with pytest.raises(ValueError): + arr.astype(np.float16) + + arr = np.array(["23490349034"], dtype=dtype) + with np.errstate(all="warn"): + with pytest.warns(RuntimeWarning): + arr.astype(np.float16) + + with np.errstate(all="raise"): + with pytest.raises(FloatingPointError): + arr.astype(np.float16) + @pytest.mark.parametrize("string_dt", ["S", "U"]) def test_half_conversion_from_string(self, string_dt): string = np.array("3.1416", dtype=string_dt) @@ -171,34 +193,35 @@ def test_half_conversion_denormal_round_even(self, float_t, uint_t, bits): assert larger_value.astype(np.float16) == smallest_value def test_nans_infs(self): + all_f16, all_f32, _ = self._create_arrays_all() with np.errstate(all='ignore'): # Check some of the ufuncs - assert_equal(np.isnan(self.all_f16), np.isnan(self.all_f32)) - assert_equal(np.isinf(self.all_f16), np.isinf(self.all_f32)) - assert_equal(np.isfinite(self.all_f16), np.isfinite(self.all_f32)) - assert_equal(np.signbit(self.all_f16), np.signbit(self.all_f32)) + assert_equal(np.isnan(all_f16), np.isnan(all_f32)) + assert_equal(np.isinf(all_f16), np.isinf(all_f32)) + assert_equal(np.isfinite(all_f16), np.isfinite(all_f32)) + assert_equal(np.signbit(all_f16), np.signbit(all_f32)) assert_equal(np.spacing(float16(65504)), np.inf) # Check comparisons of all values with NaN nan = float16(np.nan) - assert_(not (self.all_f16 == nan).any()) - assert_(not (nan == self.all_f16).any()) + assert_(not (all_f16 == nan).any()) + assert_(not (nan == all_f16).any()) - assert_((self.all_f16 != nan).all()) - assert_((nan != self.all_f16).all()) + assert_((all_f16 != nan).all()) + assert_((nan != all_f16).all()) - assert_(not (self.all_f16 < nan).any()) - assert_(not (nan < self.all_f16).any()) + assert_(not (all_f16 < nan).any()) + assert_(not (nan < all_f16).any()) - assert_(not (self.all_f16 <= nan).any()) - assert_(not (nan <= self.all_f16).any()) + assert_(not (all_f16 <= nan).any()) + assert_(not (nan <= all_f16).any()) - assert_(not (self.all_f16 > nan).any()) - assert_(not (nan > self.all_f16).any()) + assert_(not (all_f16 > nan).any()) + assert_(not (nan > all_f16).any()) - assert_(not (self.all_f16 >= nan).any()) - assert_(not (nan >= self.all_f16).any()) + assert_(not (all_f16 >= nan).any()) + assert_(not (nan >= all_f16).any()) def test_half_values(self): """Confirms a small number of known half values""" @@ -218,7 +241,7 @@ def test_half_values(self): 0x0001, 0x8001, 0x0000, 0x8000, 0x7c00, 0xfc00], dtype=uint16) - b.dtype = float16 + b = b.view(dtype=float16) assert_equal(a, b) def test_half_rounding(self): @@ -255,9 +278,10 @@ def test_half_rounding(self): def test_half_correctness(self): """Take every finite float16, and check the casting functions with a manual conversion.""" + finite_f16, finite_f32, finite_f64 = self._create_arrays_finite() # Create an array of all finite float16s - a_bits = self.finite_f16.view(dtype=uint16) + a_bits = finite_f16.view(dtype=uint16) # Convert to 64-bit float manually a_sgn = (-1.0)**((a_bits & 0x8000) >> 15) @@ -270,29 +294,30 @@ def test_half_correctness(self): a_manual = a_sgn * a_man * 2.0**a_exp - a32_fail = np.nonzero(self.finite_f32 != a_manual)[0] + a32_fail = np.nonzero(finite_f32 != a_manual)[0] if len(a32_fail) != 0: bad_index = a32_fail[0] - assert_equal(self.finite_f32, a_manual, + assert_equal(finite_f32, a_manual, "First non-equal is half value 0x%x -> %g != %g" % (a_bits[bad_index], - self.finite_f32[bad_index], + finite_f32[bad_index], a_manual[bad_index])) - a64_fail = np.nonzero(self.finite_f64 != a_manual)[0] + a64_fail = np.nonzero(finite_f64 != a_manual)[0] if len(a64_fail) != 0: bad_index = a64_fail[0] - assert_equal(self.finite_f64, a_manual, + assert_equal(finite_f64, a_manual, "First non-equal is half value 0x%x -> %g != %g" % (a_bits[bad_index], - self.finite_f64[bad_index], + finite_f64[bad_index], a_manual[bad_index])) def test_half_ordering(self): """Make sure comparisons are working right""" + nonan_f16, _, _ = self._create_arrays_nonan() # All non-NaN float16 values in reverse order - a = self.nonan_f16[::-1].copy() + a = nonan_f16[::-1].copy() # 32-bit float copy b = np.array(a, dtype=float32) @@ -531,7 +556,7 @@ def test_half_fpe(self): assert_raises_fpe('overflow', lambda a, b: a - b, float16(-65504), float16(17)) assert_raises_fpe('overflow', np.nextafter, float16(65504), float16(np.inf)) - assert_raises_fpe('overflow', np.nextafter, float16(-65504), float16(-np.inf)) + assert_raises_fpe('overflow', np.nextafter, float16(-65504), float16(-np.inf)) # noqa: E501 assert_raises_fpe('overflow', np.spacing, float16(65504)) # Invalid value errors diff --git a/numpy/_core/tests/test_hashtable.py b/numpy/_core/tests/test_hashtable.py index 74be5219a287..25a7158aaf6f 100644 --- a/numpy/_core/tests/test_hashtable.py +++ b/numpy/_core/tests/test_hashtable.py @@ -1,6 +1,7 @@ import random import pytest + from numpy._core._multiarray_tests import identityhash_tester diff --git a/numpy/_core/tests/test_indexerrors.py b/numpy/_core/tests/test_indexerrors.py index 02110c28356a..70e97dd6428e 100644 --- a/numpy/_core/tests/test_indexerrors.py +++ b/numpy/_core/tests/test_indexerrors.py @@ -1,8 +1,5 @@ import numpy as np -from numpy.testing import ( - assert_raises, - assert_raises_regex, -) +from numpy.testing import assert_raises, assert_raises_regex class TestIndexErrors: diff --git a/numpy/_core/tests/test_indexing.py b/numpy/_core/tests/test_indexing.py index e722d0c1a9df..65d42d6c9370 100644 --- a/numpy/_core/tests/test_indexing.py +++ b/numpy/_core/tests/test_indexing.py @@ -1,22 +1,23 @@ import functools +import inspect import operator import sys import warnings from itertools import product import pytest -from numpy._core._multiarray_tests import array_indexing import numpy as np +from numpy._core._multiarray_tests import array_indexing from numpy.exceptions import ComplexWarning, VisibleDeprecationWarning from numpy.testing import ( HAS_REFCOUNT, + IS_PYPY, assert_, assert_array_equal, assert_equal, assert_raises, assert_raises_regex, - assert_warns, ) @@ -615,22 +616,6 @@ def test_nontuple_ndindex(self): assert_equal(a[[0, 1], [0, 1]], np.array([0, 6])) assert_raises(IndexError, a.__getitem__, [slice(None)]) - def test_flat_index_on_flatiter(self): - a = np.arange(9).reshape((3, 3)) - b = np.array([0, 5, 6]) - assert_equal(a.flat[b.flat], np.array([0, 5, 6])) - - def test_empty_string_flat_index_on_flatiter(self): - a = np.arange(9).reshape((3, 3)) - b = np.array([], dtype="S") - assert_equal(a.flat[b.flat], np.array([])) - - def test_nonempty_string_flat_index_on_flatiter(self): - a = np.arange(9).reshape((3, 3)) - b = np.array(["a"], dtype="S") - with pytest.raises(IndexError, match="unsupported iterator index"): - a.flat[b.flat] - class TestFieldIndexing: def test_scalar_return_type(self): @@ -784,15 +769,16 @@ def test_boolean_index_cast_assign(self): assert_equal(zero_array[0, 1], 1) # Fancy indexing works, although we get a cast warning. - assert_warns(ComplexWarning, + pytest.warns(ComplexWarning, zero_array.__setitem__, ([0], [1]), np.array([2 + 1j])) assert_equal(zero_array[0, 1], 2) # No complex part # Cast complex to float, throwing away the imaginary portion. - assert_warns(ComplexWarning, + pytest.warns(ComplexWarning, zero_array.__setitem__, bool_index, np.array([1j])) assert_equal(zero_array[0, 1], 0) + class TestFancyIndexingEquivalence: def test_object_assign(self): # Check that the field and object special case using copyto is active. @@ -863,10 +849,11 @@ class TestMultiIndexingAutomated: """ - def setup_method(self): - self.a = np.arange(np.prod([3, 1, 5, 6])).reshape(3, 1, 5, 6) - self.b = np.empty((3, 0, 5, 6)) - self.complex_indices = ['skip', Ellipsis, + def _create_array(self): + return np.arange(np.prod([3, 1, 5, 6])).reshape(3, 1, 5, 6) + + def _create_complex_indices(self): + return ['skip', Ellipsis, 0, # Boolean indices, up to 3-d for some special cases of eating up # dimensions, also need to test all False @@ -886,11 +873,6 @@ def setup_method(self): np.array([2, -1], dtype=np.int8), np.zeros([1] * 31, dtype=int), # trigger too large array. np.array([0., 1.])] # invalid datatype - # Some simpler indices that still cover a bit more - self.simple_indices = [Ellipsis, None, -1, [1], np.array([True]), - 'skip'] - # Very simple ones to fill the rest: - self.fill_indices = [slice(None, None), 0] def _get_multi_index(self, arr, indices): """Mimic multi dimensional indexing. @@ -1223,16 +1205,23 @@ def test_boolean(self): # it is aligned to the left. This is probably correct for # consistency with arr[boolean_array,] also no broadcasting # is done at all + a = self._create_array() self._check_multi_index( - self.a, (np.zeros_like(self.a, dtype=bool),)) + a, (np.zeros_like(a, dtype=bool),)) self._check_multi_index( - self.a, (np.zeros_like(self.a, dtype=bool)[..., 0],)) + a, (np.zeros_like(a, dtype=bool)[..., 0],)) self._check_multi_index( - self.a, (np.zeros_like(self.a, dtype=bool)[None, ...],)) + a, (np.zeros_like(a, dtype=bool)[None, ...],)) def test_multidim(self): # Automatically test combinations with complex indexes on 2nd (or 1st) # spot and the simple ones in one other spot. + a = self._create_array() + b = np.empty((3, 0, 5, 6)) + complex_indices = self._create_complex_indices() + simple_indices = [Ellipsis, None, -1, [1], np.array([True]), 'skip'] + fill_indices = [slice(None, None), 0] + with warnings.catch_warnings(): # This is so that np.array(True) is not accepted in a full integer # index, when running the file separately. @@ -1243,28 +1232,30 @@ def isskip(idx): return isinstance(idx, str) and idx == "skip" for simple_pos in [0, 2, 3]: - tocheck = [self.fill_indices, self.complex_indices, - self.fill_indices, self.fill_indices] - tocheck[simple_pos] = self.simple_indices + tocheck = [fill_indices, complex_indices, + fill_indices, fill_indices] + tocheck[simple_pos] = simple_indices for index in product(*tocheck): index = tuple(i for i in index if not isskip(i)) - self._check_multi_index(self.a, index) - self._check_multi_index(self.b, index) + self._check_multi_index(a, index) + self._check_multi_index(b, index) # Check very simple item getting: - self._check_multi_index(self.a, (0, 0, 0, 0)) - self._check_multi_index(self.b, (0, 0, 0, 0)) + self._check_multi_index(a, (0, 0, 0, 0)) + self._check_multi_index(b, (0, 0, 0, 0)) # Also check (simple cases of) too many indices: - assert_raises(IndexError, self.a.__getitem__, (0, 0, 0, 0, 0)) - assert_raises(IndexError, self.a.__setitem__, (0, 0, 0, 0, 0), 0) - assert_raises(IndexError, self.a.__getitem__, (0, 0, [1], 0, 0)) - assert_raises(IndexError, self.a.__setitem__, (0, 0, [1], 0, 0), 0) + assert_raises(IndexError, a.__getitem__, (0, 0, 0, 0, 0)) + assert_raises(IndexError, a.__setitem__, (0, 0, 0, 0, 0), 0) + assert_raises(IndexError, a.__getitem__, (0, 0, [1], 0, 0)) + assert_raises(IndexError, a.__setitem__, (0, 0, [1], 0, 0), 0) def test_1d(self): a = np.arange(10) - for index in self.complex_indices: + complex_indices = self._create_complex_indices() + for index in complex_indices: self._check_single_index(a, index) + class TestFloatNonIntegerArgument: """ These test that ``TypeError`` is raised when you try to use @@ -1352,7 +1343,8 @@ def test_boolean_indexing_fast_path(self): "size of axis is 3 but size of corresponding boolean axis is 1", lambda: a[idx1]) - # This used to incorrectly give a ValueError: operands could not be broadcast together + # This used to incorrectly give a ValueError: operands could not be + # broadcast together idx2 = np.array([[False] * 8 + [True]]) assert_raises_regex(IndexError, "boolean index did not match indexed array along axis 0; " @@ -1453,3 +1445,248 @@ def test_setitem(self): a = a.reshape(5, 2) assign(a, 4, 10) assert_array_equal(a[-1], [10, 10]) + + +class TestFlatiterIndexing: + def test_flatiter_indexing_single_integer(self): + a = np.arange(9).reshape((3, 3)) + assert_array_equal(a.flat[0], 0) + assert_array_equal(a.flat[4], 4) + assert_array_equal(a.flat[-1], 8) + + with pytest.raises(IndexError, match="index 9 is out of bounds"): + a.flat[9] + + def test_flatiter_indexing_slice(self): + a = np.arange(9).reshape((3, 3)) + assert_array_equal(a.flat[:], np.arange(9)) + assert_array_equal(a.flat[:5], np.arange(5)) + assert_array_equal(a.flat[5:10], np.arange(5, 9)) + assert_array_equal(a.flat[::2], np.arange(0, 9, 2)) + assert_array_equal(a.flat[::-1], np.arange(8, -1, -1)) + assert_array_equal(a.flat[10:5], np.array([])) + + assert_array_equal(a.flat[()], np.arange(9)) + assert_array_equal(a.flat[...], np.arange(9)) + + def test_flatiter_indexing_boolean(self): + a = np.arange(9).reshape((3, 3)) + + with pytest.warns(DeprecationWarning, match="0-dimensional boolean index"): + assert_array_equal(a.flat[True], 0) + with pytest.warns(DeprecationWarning, match="0-dimensional boolean index"): + assert_array_equal(a.flat[False], np.array([])) + + mask = np.zeros(len(a.flat), dtype=bool) + mask[::2] = True + assert_array_equal(a.flat[mask], np.arange(0, 9, 2)) + + wrong_mask = np.zeros(len(a.flat) + 1, dtype=bool) + with pytest.raises(IndexError, + match="boolean index did not match indexed flat iterator"): + a.flat[wrong_mask] + + def test_flatiter_indexing_fancy(self): + a = np.arange(9).reshape((3, 3)) + + indices = np.array([1, 3, 5]) + assert_array_equal(a.flat[indices], indices) + + assert_array_equal(a.flat[[-1, -2]], np.array([8, 7])) + + indices_2d = np.array([[1, 2], [3, 4]]) + assert_array_equal(a.flat[indices_2d], indices_2d) + + assert_array_equal(a.flat[[True, 1]], np.array([1, 1])) + + assert_array_equal(a.flat[[]], np.array([], dtype=a.dtype)) + + with pytest.raises(IndexError, + match="boolean indices for iterators are not supported"): + a.flat[[True, True]] + + a = np.arange(3) + with pytest.raises(IndexError, + match="boolean indices for iterators are not supported"): + a.flat[[True, False, True]] + assert_array_equal(a.flat[np.asarray([True, False, True])], np.array([0, 2])) + + def test_flatiter_indexing_not_supported_newaxis_mutlidimensional_float(self): + a = np.arange(9).reshape((3, 3)) + with pytest.raises(IndexError, + match=r"only integers, slices \(`:`\), " + r"ellipsis \(`\.\.\.`\) and " + r"integer or boolean arrays are valid indices"): + a.flat[None] + + with pytest.raises(IndexError, + match=r"too many indices for flat iterator: flat iterator " + r"is 1-dimensional, but 2 were indexed"): + a.flat[1, 2] + + with pytest.warns(DeprecationWarning, + match="Invalid non-array indices for iterator objects are " + "deprecated"): + assert_array_equal(a.flat[[1.0, 2.0]], np.array([1, 2])) + + def test_flatiter_assign_single_integer(self): + a = np.arange(9).reshape((3, 3)) + + a.flat[0] = 10 + assert_array_equal(a, np.array([[10, 1, 2], [3, 4, 5], [6, 7, 8]])) + + a.flat[4] = 20 + assert_array_equal(a, np.array([[10, 1, 2], [3, 20, 5], [6, 7, 8]])) + + a.flat[-1] = 30 + assert_array_equal(a, np.array([[10, 1, 2], [3, 20, 5], [6, 7, 30]])) + + with pytest.raises(IndexError, match="index 9 is out of bounds"): + a.flat[9] = 40 + + def test_flatiter_indexing_slice_assign(self): + a = np.arange(9).reshape((3, 3)) + a.flat[:] = 10 + assert_array_equal(a, np.full((3, 3), 10)) + + a = np.arange(9).reshape((3, 3)) + a.flat[:5] = 20 + assert_array_equal(a, np.array([[20, 20, 20], [20, 20, 5], [6, 7, 8]])) + + a = np.arange(9).reshape((3, 3)) + a.flat[5:10] = 30 + assert_array_equal(a, np.array([[0, 1, 2], [3, 4, 30], [30, 30, 30]])) + + a = np.arange(9).reshape((3, 3)) + a.flat[::2] = 40 + assert_array_equal(a, np.array([[40, 1, 40], [3, 40, 5], [40, 7, 40]])) + + a = np.arange(9).reshape((3, 3)) + a.flat[::-1] = 50 + assert_array_equal(a, np.full((3, 3), 50)) + + a = np.arange(9).reshape((3, 3)) + a.flat[10:5] = 60 + assert_array_equal(a, np.arange(9).reshape((3, 3))) + + a = np.arange(9).reshape((3, 3)) + with pytest.raises(IndexError, + match="Assigning to a flat iterator with a 0-D index"): + a.flat[()] = 70 + + a = np.arange(9).reshape((3, 3)) + a.flat[...] = 80 + assert_array_equal(a, np.full((3, 3), 80)) + + def test_flatiter_indexing_boolean_assign(self): + a = np.arange(9).reshape((3, 3)) + with pytest.warns(DeprecationWarning, match="0-dimensional boolean index"): + a.flat[True] = 10 + assert_array_equal(a, np.array([[10, 1, 2], [3, 4, 5], [6, 7, 8]])) + + a = np.arange(9).reshape((3, 3)) + with pytest.warns(DeprecationWarning, match="0-dimensional boolean index"): + a.flat[False] = 20 + assert_array_equal(a, np.arange(9).reshape((3, 3))) + + a = np.arange(9).reshape((3, 3)) + mask = np.zeros(len(a.flat), dtype=bool) + mask[::2] = True + a.flat[mask] = 30 + assert_array_equal(a, np.array([[30, 1, 30], [3, 30, 5], [30, 7, 30]])) + + wrong_mask = np.zeros(len(a.flat) + 1, dtype=bool) + with pytest.raises(IndexError, + match="boolean index did not match indexed flat iterator"): + a.flat[wrong_mask] = 40 + + def test_flatiter_indexing_fancy_assign(self): + a = np.arange(9).reshape((3, 3)) + indices = np.array([1, 3, 5]) + a.flat[indices] = 10 + assert_array_equal(a, np.array([[0, 10, 2], [10, 4, 10], [6, 7, 8]])) + + a.flat[[-1, -2]] = 20 + assert_array_equal(a, np.array([[0, 10, 2], [10, 4, 10], [6, 20, 20]])) + + a = np.arange(9).reshape((3, 3)) + indices_2d = np.array([[1, 2], [3, 4]]) + a.flat[indices_2d] = 30 + assert_array_equal(a, np.array([[0, 30, 30], [30, 30, 5], [6, 7, 8]])) + + a.flat[[True, 1]] = 40 + assert_array_equal(a, np.array([[0, 40, 30], [30, 30, 5], [6, 7, 8]])) + + with pytest.raises(IndexError, + match="boolean indices for iterators are not supported"): + a.flat[[True, True]] = 50 + + a = np.arange(3) + with pytest.raises(IndexError, + match="boolean indices for iterators are not supported"): + a.flat[[True, False, True]] = 20 + a.flat[np.asarray([True, False, True])] = 20 + assert_array_equal(a, np.array([20, 1, 20])) + + def test_flatiter_indexing_fancy_int16_dtype(self): + a = np.arange(9).reshape((3, 3)) + indices = np.array([1, 3, 5], dtype=np.int16) + assert_array_equal(a.flat[indices], np.array([1, 3, 5])) + + a.flat[indices] = 10 + assert_array_equal(a, np.array([[0, 10, 2], [10, 4, 10], [6, 7, 8]])) + + def test_flatiter_indexing_not_supported_newaxis_mutlid_float_assign(self): + a = np.arange(9).reshape((3, 3)) + with pytest.raises(IndexError, + match=r"only integers, slices \(`:`\), " + r"ellipsis \(`\.\.\.`\) and " + r"integer or boolean arrays are valid indices"): + a.flat[None] = 10 + + a.flat[[1, 2]] = 10 + assert_array_equal(a, np.array([[0, 10, 10], [3, 4, 5], [6, 7, 8]])) + + with pytest.warns(DeprecationWarning, + match="Invalid non-array indices for iterator objects are " + "deprecated"): + a.flat[[1.0, 2.0]] = 20 + assert_array_equal(a, np.array([[0, 20, 20], [3, 4, 5], [6, 7, 8]])) + + def test_flat_index_on_flatiter(self): + a = np.arange(9).reshape((3, 3)) + b = np.array([0, 5, 6]) + assert_equal(a.flat[b.flat], np.array([0, 5, 6])) + + def test_empty_string_flat_index_on_flatiter(self): + a = np.arange(9).reshape((3, 3)) + b = np.array([], dtype="S") + # This is arguably incorrect, and should be removed (ideally with + # deprecation). But it matches the array path and comes from not + # distinguishing `arr[np.array([]).flat]` and `arr[[]]` and the latter + # must pass. + assert_equal(a.flat[b.flat], np.array([])) + + def test_nonempty_string_flat_index_on_flatiter(self): + a = np.arange(9).reshape((3, 3)) + b = np.array(["a"], dtype="S") + with pytest.raises(IndexError, + match=r"only integers, slices \(`:`\), ellipsis \(`\.\.\.`\) " + r"and integer or boolean arrays are valid indices"): + a.flat[b.flat] + + +@pytest.mark.skipif(sys.flags.optimize == 2, reason="Python running -OO") +@pytest.mark.xfail(IS_PYPY, reason="PyPy does not modify tp_doc") +@pytest.mark.parametrize("methodname", ["__array__", "copy"]) +def test_flatiter_method_signatures(methodname: str): + method = getattr(np.flatiter, methodname) + assert callable(method) + + try: + sig = inspect.signature(method) + except ValueError as e: + pytest.fail(f"Could not get signature for np.flatiter.{methodname}: {e}") + + assert "self" in sig.parameters + assert sig.parameters["self"].kind is inspect.Parameter.POSITIONAL_ONLY diff --git a/numpy/_core/tests/test_longdouble.py b/numpy/_core/tests/test_longdouble.py index f7edd9774573..a7aa9145711a 100644 --- a/numpy/_core/tests/test_longdouble.py +++ b/numpy/_core/tests/test_longdouble.py @@ -291,7 +291,8 @@ def test_array_repr(): b = np.array([1], dtype=np.longdouble) if not np.all(a != b): raise ValueError("precision loss creating arrays") - assert_(repr(a) != repr(b)) + with np.printoptions(precision=LD_INFO.precision + 1): + assert_(repr(a) != repr(b)) # # Locale tests: scalar types formatting should be independent of the locale diff --git a/numpy/_core/tests/test_machar.py b/numpy/_core/tests/test_machar.py deleted file mode 100644 index 2d772dd51233..000000000000 --- a/numpy/_core/tests/test_machar.py +++ /dev/null @@ -1,30 +0,0 @@ -""" -Test machar. Given recent changes to hardcode type data, we might want to get -rid of both MachAr and this test at some point. - -""" -import numpy._core.numerictypes as ntypes -from numpy import array, errstate -from numpy._core._machar import MachAr - - -class TestMachAr: - def _run_machar_highprec(self): - # Instantiate MachAr instance with high enough precision to cause - # underflow - try: - hiprec = ntypes.float96 - MachAr(lambda v: array(v, hiprec)) - except AttributeError: - # Fixme, this needs to raise a 'skip' exception. - "Skipping test: no ntypes.float96 available on this platform." - - def test_underlow(self): - # Regression test for #759: - # instantiating MachAr for dtype = np.float96 raises spurious warning. - with errstate(all='raise'): - try: - self._run_machar_highprec() - except FloatingPointError as e: - msg = f"Caught {e} exception, should not have been raised." - raise AssertionError(msg) diff --git a/numpy/_core/tests/test_mem_overlap.py b/numpy/_core/tests/test_mem_overlap.py index d1735670ad6b..240ea62850ee 100644 --- a/numpy/_core/tests/test_mem_overlap.py +++ b/numpy/_core/tests/test_mem_overlap.py @@ -1,10 +1,10 @@ import itertools import pytest -from numpy._core._multiarray_tests import internal_overlap, solve_diophantine import numpy as np from numpy._core import _umath_tests +from numpy._core._multiarray_tests import internal_overlap, solve_diophantine from numpy.lib.stride_tricks import as_strided from numpy.testing import assert_, assert_array_equal, assert_equal, assert_raises @@ -165,8 +165,9 @@ def check_may_share_memory_exact(a, b): err_msg = "" if got != exact: + base_delta = a.__array_interface__['data'][0] - b.__array_interface__['data'][0] err_msg = " " + "\n ".join([ - f"base_a - base_b = {a.__array_interface__['data'][0] - b.__array_interface__['data'][0]!r}", + f"base_a - base_b = {base_delta!r}", f"shape_a = {a.shape!r}", f"shape_b = {b.shape!r}", f"strides_a = {a.strides!r}", @@ -402,7 +403,9 @@ def check(A, U, exists=None): exists = (X is not None) if X is not None: - assert_(sum(a * x for a, x in zip(A, X)) == sum(a * u // 2 for a, u in zip(A, U))) + sum_ax = sum(a * x for a, x in zip(A, X)) + sum_au_half = sum(a * u // 2 for a, u in zip(A, U)) + assert_(sum_ax == sum_au_half) assert_(all(0 <= x <= u for x, u in zip(X, U))) assert_(any(x != u // 2 for x, u in zip(X, U))) diff --git a/numpy/_core/tests/test_mem_policy.py b/numpy/_core/tests/test_mem_policy.py index b9f971e73249..720ea1aa91b8 100644 --- a/numpy/_core/tests/test_mem_policy.py +++ b/numpy/_core/tests/test_mem_policy.py @@ -9,7 +9,7 @@ import numpy as np from numpy._core.multiarray import get_handler_name -from numpy.testing import IS_EDITABLE, IS_WASM, assert_warns, extbuild +from numpy.testing import IS_EDITABLE, IS_WASM, extbuild @pytest.fixture @@ -343,9 +343,6 @@ async def async_test_context_locality(get_module): def test_context_locality(get_module): - if (sys.implementation.name == 'pypy' - and sys.pypy_version_info[:3] < (7, 3, 6)): - pytest.skip('no context-locality support in PyPy < 7.3.6') asyncio.run(async_test_context_locality(get_module)) @@ -411,10 +408,8 @@ def test_new_policy(get_module): assert np._core.multiarray.get_handler_name(c) == orig_policy_name -@pytest.mark.xfail(sys.implementation.name == "pypy", - reason=("bad interaction between getenv and " - "os.environ inside pytest")) @pytest.mark.parametrize("policy", ["0", "1", None]) +@pytest.mark.thread_unsafe(reason="modifies environment variables") def test_switch_owner(get_module, policy): a = get_module.get_array() assert np._core.multiarray.get_handler_name(a) is None @@ -432,7 +427,7 @@ def test_switch_owner(get_module, policy): # The policy should be NULL, so we have to assume we can call # "free". A warning is given if the policy == "1" if policy: - with assert_warns(RuntimeWarning) as w: + with pytest.warns(RuntimeWarning) as w: del a gc.collect() else: diff --git a/numpy/_core/tests/test_memmap.py b/numpy/_core/tests/test_memmap.py index cbd825205844..8e2aa0a507b1 100644 --- a/numpy/_core/tests/test_memmap.py +++ b/numpy/_core/tests/test_memmap.py @@ -1,6 +1,7 @@ import mmap import os import sys +import warnings from pathlib import Path from tempfile import NamedTemporaryFile, TemporaryFile @@ -26,10 +27,10 @@ assert_array_equal, assert_equal, break_cycles, - suppress_warnings, ) +@pytest.mark.thread_unsafe(reason="setup & memmap is thread-unsafe (gh-29126)") class TestMemmap: def setup_method(self): self.tmpfp = NamedTemporaryFile(prefix='mmap') @@ -167,8 +168,9 @@ def test_ufunc_return_ndarray(self): fp = memmap(self.tmpfp, dtype=self.dtype, shape=self.shape) fp[:] = self.data - with suppress_warnings() as sup: - sup.filter(FutureWarning, "np.average currently does not preserve") + with warnings.catch_warnings(): + warnings.filterwarnings( + 'ignore', "np.average currently does not preserve", FutureWarning) for unary_op in [sum, average, prod]: result = unary_op(fp) assert_(isscalar(result)) diff --git a/numpy/_core/tests/test_multiarray.py b/numpy/_core/tests/test_multiarray.py index 7603449ba28e..62f3bd4a77c4 100644 --- a/numpy/_core/tests/test_multiarray.py +++ b/numpy/_core/tests/test_multiarray.py @@ -3,6 +3,8 @@ import ctypes import functools import gc +import importlib +import inspect import io import itertools import mmap @@ -21,20 +23,21 @@ from datetime import datetime, timedelta from decimal import Decimal -import numpy._core._multiarray_tests as _multiarray_tests import pytest -from numpy._core._rational_tests import rational import numpy as np +import numpy._core._multiarray_tests as _multiarray_tests +from numpy._core._rational_tests import rational from numpy._core.multiarray import _get_ndarray_c_version, dot from numpy._core.tests._locales import CommaDecimalPointLocale from numpy.exceptions import AxisError, ComplexWarning +from numpy.lib import stride_tricks from numpy.lib.recfunctions import repack_fields from numpy.testing import ( + BLAS_SUPPORTS_FPE, HAS_REFCOUNT, IS_64BIT, IS_PYPY, - IS_PYSTON, IS_WASM, assert_, assert_allclose, @@ -46,14 +49,16 @@ assert_equal, assert_raises, assert_raises_regex, - assert_warns, break_cycles, check_support_sve, runstring, - suppress_warnings, temppath, ) -from numpy.testing._private.utils import _no_tracing, requires_memory +from numpy.testing._private.utils import ( + _no_tracing, + requires_deep_recursion, + requires_memory, +) def assert_arg_sorted(arr, arg): @@ -105,16 +110,14 @@ def _aligned_zeros(shape, dtype=float, order="C", align=None): class TestFlags: - def setup_method(self): - self.a = np.arange(10) - def test_writeable(self): + arr = np.arange(10) mydict = locals() - self.a.flags.writeable = False - assert_raises(ValueError, runstring, 'self.a[0] = 3', mydict) - self.a.flags.writeable = True - self.a[0] = 5 - self.a[0] = 0 + arr.flags.writeable = False + assert_raises(ValueError, runstring, 'arr[0] = 3', mydict) + arr.flags.writeable = True + arr[0] = 5 + arr[0] = 0 def test_writeable_any_base(self): # Ensure that any base being writeable is sufficient to change flag; @@ -252,18 +255,19 @@ class MyArr: assert np.asarray(MyArr()).flags.writeable is writeable def test_otherflags(self): - assert_equal(self.a.flags.carray, True) - assert_equal(self.a.flags['C'], True) - assert_equal(self.a.flags.farray, False) - assert_equal(self.a.flags.behaved, True) - assert_equal(self.a.flags.fnc, False) - assert_equal(self.a.flags.forc, True) - assert_equal(self.a.flags.owndata, True) - assert_equal(self.a.flags.writeable, True) - assert_equal(self.a.flags.aligned, True) - assert_equal(self.a.flags.writebackifcopy, False) - assert_equal(self.a.flags['X'], False) - assert_equal(self.a.flags['WRITEBACKIFCOPY'], False) + arr = np.arange(10) + assert_equal(arr.flags.carray, True) + assert_equal(arr.flags['C'], True) + assert_equal(arr.flags.farray, False) + assert_equal(arr.flags.behaved, True) + assert_equal(arr.flags.fnc, False) + assert_equal(arr.flags.forc, True) + assert_equal(arr.flags.owndata, True) + assert_equal(arr.flags.writeable, True) + assert_equal(arr.flags.aligned, True) + assert_equal(arr.flags.writebackifcopy, False) + assert_equal(arr.flags['X'], False) + assert_equal(arr.flags['WRITEBACKIFCOPY'], False) def test_string_align(self): a = np.zeros(4, dtype=np.dtype('|S4')) @@ -311,41 +315,44 @@ def test_int(self): class TestAttributes: - def setup_method(self): - self.one = np.arange(10) - self.two = np.arange(20).reshape(4, 5) - self.three = np.arange(60, dtype=np.float64).reshape(2, 5, 6) + def _create_arrays(self): + one = np.arange(10) + two = np.arange(20).reshape(4, 5) + three = np.arange(60, dtype=np.float64).reshape(2, 5, 6) + return one, two, three def test_attributes(self): - assert_equal(self.one.shape, (10,)) - assert_equal(self.two.shape, (4, 5)) - assert_equal(self.three.shape, (2, 5, 6)) - self.three.shape = (10, 3, 2) - assert_equal(self.three.shape, (10, 3, 2)) - self.three.shape = (2, 5, 6) - assert_equal(self.one.strides, (self.one.itemsize,)) - num = self.two.itemsize - assert_equal(self.two.strides, (5 * num, num)) - num = self.three.itemsize - assert_equal(self.three.strides, (30 * num, 6 * num, num)) - assert_equal(self.one.ndim, 1) - assert_equal(self.two.ndim, 2) - assert_equal(self.three.ndim, 3) - num = self.two.itemsize - assert_equal(self.two.size, 20) - assert_equal(self.two.nbytes, 20 * num) - assert_equal(self.two.itemsize, self.two.dtype.itemsize) - assert_equal(self.two.base, np.arange(20)) + one, two, three = self._create_arrays() + assert_equal(one.shape, (10,)) + assert_equal(two.shape, (4, 5)) + assert_equal(three.shape, (2, 5, 6)) + three.shape = (10, 3, 2) + assert_equal(three.shape, (10, 3, 2)) + three.shape = (2, 5, 6) + assert_equal(one.strides, (one.itemsize,)) + num = two.itemsize + assert_equal(two.strides, (5 * num, num)) + num = three.itemsize + assert_equal(three.strides, (30 * num, 6 * num, num)) + assert_equal(one.ndim, 1) + assert_equal(two.ndim, 2) + assert_equal(three.ndim, 3) + num = two.itemsize + assert_equal(two.size, 20) + assert_equal(two.nbytes, 20 * num) + assert_equal(two.itemsize, two.dtype.itemsize) + assert_equal(two.base, np.arange(20)) def test_dtypeattr(self): - assert_equal(self.one.dtype, np.dtype(np.int_)) - assert_equal(self.three.dtype, np.dtype(np.float64)) - assert_equal(self.one.dtype.char, np.dtype(int).char) - assert self.one.dtype.char in "lq" - assert_equal(self.three.dtype.char, 'd') - assert_(self.three.dtype.str[0] in '<>') - assert_equal(self.one.dtype.str[1], 'i') - assert_equal(self.three.dtype.str[1], 'f') + one, _, three = self._create_arrays() + assert_equal(one.dtype, np.dtype(np.int_)) + assert_equal(three.dtype, np.dtype(np.float64)) + assert_equal(one.dtype.char, np.dtype(int).char) + assert one.dtype.char in "lq" + assert_equal(three.dtype.char, 'd') + assert_(three.dtype.str[0] in '<>') + assert_equal(one.dtype.str[1], 'i') + assert_equal(three.dtype.str[1], 'f') def test_int_subclassing(self): # Regression test for https://github.com/numpy/numpy/pull/3526 @@ -356,7 +363,7 @@ def test_int_subclassing(self): assert_(not isinstance(numpy_int, int)) def test_stridesattr(self): - x = self.one + x, _, _ = self._create_arrays() def make_array(size, offset, strides): return np.ndarray(size, buffer=x, dtype=int, @@ -373,7 +380,7 @@ def make_array(size, offset, strides): make_array(0, 0, 10) def test_set_stridesattr(self): - x = self.one + x, _, _ = self._create_arrays() def make_array(size, offset, strides): try: @@ -381,7 +388,8 @@ def make_array(size, offset, strides): offset=offset * x.itemsize) except Exception as e: raise RuntimeError(e) - r.strides = strides = strides * x.itemsize + with pytest.warns(DeprecationWarning): + r.strides = strides * x.itemsize return r assert_equal(make_array(4, 4, -1), np.array([4, 3, 2, 1])) @@ -391,24 +399,28 @@ def make_array(size, offset, strides): assert_raises(RuntimeError, make_array, 8, 3, 1) # Check that the true extent of the array is used. # Test relies on as_strided base not exposing a buffer. - x = np.lib.stride_tricks.as_strided(np.arange(1), (10, 10), (0, 0)) + x = stride_tricks.as_strided(np.arange(1), (10, 10), (0, 0)) def set_strides(arr, strides): - arr.strides = strides + with pytest.warns(DeprecationWarning): + arr.strides = strides assert_raises(ValueError, set_strides, x, (10 * x.itemsize, x.itemsize)) # Test for offset calculations: - x = np.lib.stride_tricks.as_strided(np.arange(10, dtype=np.int8)[-1], + x = stride_tricks.as_strided(np.arange(10, dtype=np.int8)[-1], shape=(10,), strides=(-1,)) assert_raises(ValueError, set_strides, x[::-1], -1) a = x[::-1] - a.strides = 1 - a[::2].strides = 2 + with pytest.warns(DeprecationWarning): + a.strides = 1 + with pytest.warns(DeprecationWarning): + a[::2].strides = 2 # test 0d arr_0d = np.array(0) - arr_0d.strides = () + with pytest.warns(DeprecationWarning): + arr_0d.strides = () assert_raises(TypeError, set_strides, arr_0d, None) def test_fill(self): @@ -582,6 +594,32 @@ def test_array_as_keyword(self, func): else: func(a=3) + @pytest.mark.skipif(sys.flags.optimize == 2, reason="Python running -OO") + @pytest.mark.xfail(IS_PYPY, reason="PyPy does not modify tp_doc") + @pytest.mark.parametrize("func", + [np.array, + np.asarray, + np.asanyarray, + np.ascontiguousarray, + np.asfortranarray]) + def test_array_signature(self, func): + sig = inspect.signature(func) + + assert len(sig.parameters) >= 3 + + arg0 = "object" if func is np.array else "a" + assert arg0 in sig.parameters + assert sig.parameters[arg0].default is inspect.Parameter.empty + assert sig.parameters[arg0].kind is inspect.Parameter.POSITIONAL_OR_KEYWORD + + assert "dtype" in sig.parameters + assert sig.parameters["dtype"].default is None + assert sig.parameters["dtype"].kind is inspect.Parameter.POSITIONAL_OR_KEYWORD + + assert "like" in sig.parameters + assert sig.parameters["like"].default is None + assert sig.parameters["like"].kind is inspect.Parameter.KEYWORD_ONLY + class TestAssignment: def test_assignment_broadcasting(self): @@ -727,46 +765,46 @@ def test_structured_non_void(self): class TestZeroRank: - def setup_method(self): - self.d = np.array(0), np.array('x', object) + def _create_arrays(self): + return np.array(0), np.array('x', object) def test_ellipsis_subscript(self): - a, b = self.d + a, b = self._create_arrays() assert_equal(a[...], 0) assert_equal(b[...], 'x') assert_(a[...].base is a) # `a[...] is a` in numpy <1.9. assert_(b[...].base is b) # `b[...] is b` in numpy <1.9. def test_empty_subscript(self): - a, b = self.d + a, b = self._create_arrays() assert_equal(a[()], 0) assert_equal(b[()], 'x') assert_(type(a[()]) is a.dtype.type) assert_(type(b[()]) is str) def test_invalid_subscript(self): - a, b = self.d + a, b = self._create_arrays() assert_raises(IndexError, lambda x: x[0], a) assert_raises(IndexError, lambda x: x[0], b) assert_raises(IndexError, lambda x: x[np.array([], int)], a) assert_raises(IndexError, lambda x: x[np.array([], int)], b) def test_ellipsis_subscript_assignment(self): - a, b = self.d + a, b = self._create_arrays() a[...] = 42 assert_equal(a, 42) b[...] = '' assert_equal(b.item(), '') def test_empty_subscript_assignment(self): - a, b = self.d + a, b = self._create_arrays() a[()] = 42 assert_equal(a, 42) b[()] = '' assert_equal(b.item(), '') def test_invalid_subscript_assignment(self): - a, b = self.d + a, b = self._create_arrays() def assign(x, i, v): x[i] = v @@ -776,7 +814,7 @@ def assign(x, i, v): assert_raises(ValueError, assign, a, (), '') def test_newaxis(self): - a, b = self.d + a, _ = self._create_arrays() assert_equal(a[np.newaxis].shape, (1,)) assert_equal(a[..., np.newaxis].shape, (1,)) assert_equal(a[np.newaxis, ...].shape, (1,)) @@ -787,7 +825,7 @@ def test_newaxis(self): assert_equal(a[(np.newaxis,) * 10].shape, (1,) * 10) def test_invalid_newaxis(self): - a, b = self.d + a, _ = self._create_arrays() def subscript(x, i): x[i] @@ -831,26 +869,26 @@ def test_real_imag(self): class TestScalarIndexing: - def setup_method(self): - self.d = np.array([0, 1])[0] + def _create_array(self): + return np.array([0, 1])[0] def test_ellipsis_subscript(self): - a = self.d + a = self._create_array() assert_equal(a[...], 0) assert_equal(a[...].shape, ()) def test_empty_subscript(self): - a = self.d + a = self._create_array() assert_equal(a[()], 0) assert_equal(a[()].shape, ()) def test_invalid_subscript(self): - a = self.d + a = self._create_array() assert_raises(IndexError, lambda x: x[0], a) assert_raises(IndexError, lambda x: x[np.array([], int)], a) def test_invalid_subscript_assignment(self): - a = self.d + a = self._create_array() def assign(x, i, v): x[i] = v @@ -858,7 +896,7 @@ def assign(x, i, v): assert_raises(TypeError, assign, a, 0, 42) def test_newaxis(self): - a = self.d + a = self._create_array() assert_equal(a[np.newaxis].shape, (1,)) assert_equal(a[..., np.newaxis].shape, (1,)) assert_equal(a[np.newaxis, ...].shape, (1,)) @@ -869,7 +907,7 @@ def test_newaxis(self): assert_equal(a[(np.newaxis,) * 10].shape, (1,) * 10) def test_invalid_newaxis(self): - a = self.d + a = self._create_array() def subscript(x, i): x[i] @@ -995,6 +1033,7 @@ def test_too_big_error(self): @pytest.mark.skipif(not IS_64BIT, reason="malloc may not fail on 32 bit systems") + @pytest.mark.thread_unsafe(reason="large slow test in parallel") def test_malloc_fails(self): # This test is guaranteed to fail due to a too large allocation with assert_raises(np._core._exceptions._ArrayMemoryError): @@ -1274,6 +1313,85 @@ def test_creation_from_dtypemeta(self, func): assert_array_equal(arr1, arr2) assert arr2.dtype == dtype + def test_ndmax_less_than_actual_dims_dtype_object(self): + data = [[1, 2, 3], [4, 5, 6]] + arr = np.array(data, ndmax=1, dtype=object) + assert arr.ndim == 1 + assert arr.shape == (2,) + assert arr.dtype == object + + data = [[1, 2, 3], [4, 5]] + arr = np.array(data, ndmax=1, dtype=object) + assert arr.ndim == 1 + assert arr.shape == (2,) + assert arr.dtype == object + + data = [[[1], [2]], [[3], [4]]] + arr = np.array(data, ndmax=2, dtype=object) + assert arr.ndim == 2 + assert arr.shape == (2, 2) + assert arr.dtype == object + + def test_ndmax_equal_to_actual_dims(self): + data = [[1, 2], [3, 4]] + arr = np.array(data, ndmax=2) + assert arr.ndim == 2 + assert_array_equal(arr, np.array(data)) + + def test_ndmax_greater_than_actual_dims(self): + data = [[1, 2], [3, 4]] + arr = np.array(data, ndmax=3) + assert arr.ndim == 2 + assert_array_equal(arr, np.array(data)) + + def test_ndmax_less_than_actual_dims(self): + data = [[[1], [2]], [[3], [4]]] + with pytest.raises(ValueError, + match="setting an array element with a sequence. " + "The requested array would exceed the maximum number of dimension of 2."): + np.array(data, ndmax=2) + + def test_ndmax_is_zero(self): + data = [1, 2, 3] + arr = np.array(data, ndmax=0, dtype=object) + assert arr.ndim == 0 + assert arr.shape == () + assert arr.dtype == object + + data = [[1, 2, 3], [4, 5, 6]] + arr = np.array(data, ndmax=0, dtype=object) + assert arr.ndim == 0 + assert arr.shape == () + assert arr.dtype == object + + data = [[1, 2, 3], [4, 5]] + arr = np.array(data, ndmax=0, dtype=object) + assert arr.ndim == 0 + assert arr.shape == () + assert arr.dtype == object + + def test_ndmax_less_than_ndmin(self): + data = [[[1], [2]], [[3], [4]]] + with pytest.raises(ValueError, match="ndmin must be <= ndmax"): + np.array(data, ndmax=1, ndmin=2) + + def test_ndmax_is_negative(self): + data = [1, 2, 3] + with pytest.raises(ValueError, match="ndmax must be in the range"): + np.array(data, ndmax=-1) + + def test_ndmax_greather_than_NPY_MAXDIMS(self): + data = [1, 2, 3] + # current NPY_MAXDIMS is 64 + with pytest.raises(ValueError, match="ndmax must be in the range"): + np.array(data, ndmax=65) + + def test_ndmax_less_than_ndim(self): + # np.array input bypasses recursive inference, allowing ndim > ndmax validation + data = np.array([[1, 2, 3], [4, 5, 6]]) + with pytest.raises(ValueError, match="object too deep for desired array"): + np.array(data, ndmax=1, dtype=object) + class TestStructured: def test_subarray_field_access(self): @@ -1782,11 +1900,9 @@ def _test_cast_from_flexible(self, dtype): def test_cast_from_void(self): self._test_cast_from_flexible(np.void) - @pytest.mark.xfail(reason="See gh-9847") def test_cast_from_unicode(self): self._test_cast_from_flexible(np.str_) - @pytest.mark.xfail(reason="See gh-9847") def test_cast_from_bytes(self): self._test_cast_from_flexible(np.bytes_) @@ -2075,6 +2191,7 @@ def check_round(arr, expected, *round_args): assert_equal(out, expected) assert out is res + check_round(np.array([1, 2, 3]), [1, 2, 3]) check_round(np.array([1.2, 1.5]), [1, 2]) check_round(np.array(1.5), 2) check_round(np.array([12.2, 15.5]), [10, 20], -1) @@ -2083,6 +2200,20 @@ def check_round(arr, expected, *round_args): check_round(np.array([4.5 + 1.5j]), [4 + 2j]) check_round(np.array([12.5 + 15.5j]), [10 + 20j], -1) + @pytest.mark.parametrize('dt', ['uint8', int, float, complex]) + def test_round_copies(self, dt): + a = np.arange(3, dtype=dt) + assert not np.shares_memory(a.round(), a) + assert not np.shares_memory(a.round(decimals=2), a) + + out = np.empty(3, dtype=dt) + assert not np.shares_memory(a.round(out=out), a) + + a = np.arange(12).astype(dt).reshape(3, 4).T + + assert a.flags.f_contiguous + assert np.round(a).flags.f_contiguous + def test_squeeze(self): a = np.array([[[1], [2], [3]]]) assert_equal(a.squeeze(), [1, 2, 3]) @@ -2118,7 +2249,7 @@ def test_sort(self): with assert_raises_regex( ValueError, - "kind` and `stable` parameters can't be provided at the same time" + "`kind` and keyword parameters can't be provided at the same time" ): np.sort(a, kind="stable", stable=True) @@ -2274,8 +2405,7 @@ def test_sort_axis(self): def test_sort_size_0(self): # check axis handling for multidimensional empty arrays - a = np.array([]) - a.shape = (3, 2, 1, 0) + a = np.array([]).reshape((3, 2, 1, 0)) for axis in range(-a.ndim, a.ndim): msg = f'test empty array sort with axis={axis}' assert_equal(np.sort(a, axis=axis), a, msg) @@ -2378,6 +2508,20 @@ def test__deepcopy__(self, dtype): with pytest.raises(AssertionError): assert_array_equal(a, b) + def test__deepcopy___void_scalar(self): + # see comments in gh-29643 + value = np.void('Rex', dtype=[('name', 'U10')]) + value_deepcopy = value.__deepcopy__(None) + value[0] = None + assert value_deepcopy[0] == 'Rex' + + @pytest.mark.parametrize("sctype", [np.int64, np.float32, np.float64]) + def test__deepcopy__scalar(self, sctype): + # test optimization from gh-29656 + value = sctype(1.1) + value_deepcopy = value.__deepcopy__(None) + assert value is value_deepcopy + def test__deepcopy__catches_failure(self): class MyObj: def __deepcopy__(self, *args, **kwargs): @@ -2536,8 +2680,7 @@ def test_argsort(self): assert_equal(a.copy().argsort(), c) # check axis handling for multidimensional empty arrays - a = np.array([]) - a.shape = (3, 2, 1, 0) + a = np.array([]).reshape((3, 2, 1, 0)) for axis in range(-a.ndim, a.ndim): msg = f'test empty array argsort with axis={axis}' assert_equal(np.argsort(a, axis=axis), @@ -2563,7 +2706,7 @@ def test_argsort(self): with assert_raises_regex( ValueError, - "kind` and `stable` parameters can't be provided at the same time" + "`kind` and keyword parameters can't be provided at the same time" ): np.argsort(a, kind="stable", stable=True) @@ -2854,8 +2997,7 @@ def test_partition_integer(self): def test_partition_empty_array(self, kth_dtype): # check axis handling for multidimensional empty arrays kth = np.array(0, dtype=kth_dtype)[()] - a = np.array([]) - a.shape = (3, 2, 1, 0) + a = np.array([]).reshape((3, 2, 1, 0)) for axis in range(-a.ndim, a.ndim): msg = f'test empty array partition with axis={axis}' assert_equal(np.partition(a, kth, axis=axis), a, msg) @@ -2866,8 +3008,7 @@ def test_partition_empty_array(self, kth_dtype): def test_argpartition_empty_array(self, kth_dtype): # check axis handling for multidimensional empty arrays kth = np.array(0, dtype=kth_dtype)[()] - a = np.array([]) - a.shape = (3, 2, 1, 0) + a = np.array([]).reshape((3, 2, 1, 0)) for axis in range(-a.ndim, a.ndim): msg = f'test empty array argpartition with axis={axis}' assert_equal(np.partition(a, kth, axis=axis), @@ -3363,6 +3504,11 @@ def test_dot(self): @pytest.mark.parametrize("dtype", [np.half, np.double, np.longdouble]) @pytest.mark.skipif(IS_WASM, reason="no wasm fp exception support") def test_dot_errstate(self, dtype): + # Some dtypes use BLAS for 'dot' operation and + # not all BLAS support floating-point errors. + if not BLAS_SUPPORTS_FPE and dtype == np.double: + pytest.skip("BLAS does not support FPE") + a = np.array([1, 1], dtype=dtype) b = np.array([-np.inf, np.inf], dtype=dtype) @@ -3629,7 +3775,7 @@ def test_ravel(self): a = a.reshape(2, 1, 2, 2).swapaxes(-1, -2) strides = list(a.strides) strides[1] = 123 - a.strides = strides + a = stride_tricks.as_strided(a, strides=strides) assert_(a.ravel(order='K').flags.owndata) assert_equal(a.ravel('K'), np.arange(0, 15, 2)) @@ -3638,7 +3784,7 @@ def test_ravel(self): a = a.reshape(2, 1, 2, 2).swapaxes(-1, -2) strides = list(a.strides) strides[1] = 123 - a.strides = strides + a = stride_tricks.as_strided(a, strides=strides) assert_(np.may_share_memory(a.ravel(order='K'), a)) assert_equal(a.ravel(order='K'), np.arange(2**3)) @@ -3651,7 +3797,7 @@ def test_ravel(self): # 1-element tidy strides test: a = np.array([[1]]) - a.strides = (123, 432) + a = stride_tricks.as_strided(a, strides=(123, 432)) if np.ones(1).strides == (8,): assert_(np.may_share_memory(a.ravel('K'), a)) assert_equal(a.ravel('K').strides, (a.dtype.itemsize,)) @@ -3692,6 +3838,18 @@ class ArraySubclass(np.ndarray): assert_(isinstance(a.ravel('A'), ArraySubclass)) assert_(isinstance(a.ravel('K'), ArraySubclass)) + @pytest.mark.parametrize("shape", [(3, 224, 224), (8, 512, 512)]) + def test_tobytes_no_copy_fastpath(self, shape): + # Test correctness of non-contiguous paths for `tobytes` + rng = np.random.default_rng(0) + arr = rng.standard_normal(shape, dtype=np.float32) + noncontig = arr.transpose(1, 2, 0) + + # correctness + expected = np.ascontiguousarray(noncontig).tobytes() + got = noncontig.tobytes() + assert got == expected + def test_swapaxes(self): a = np.arange(1 * 2 * 3 * 4).reshape(1, 2, 3, 4).copy() idx = np.indices(a.shape) @@ -3786,21 +3944,10 @@ def test__complex__(self): '?', 'O'] for dt in dtypes: a = np.array(7, dtype=dt) - b = np.array([7], dtype=dt) - c = np.array([[[[[7]]]]], dtype=dt) - msg = f'dtype: {dt}' ap = complex(a) assert_equal(ap, a, msg) - with assert_warns(DeprecationWarning): - bp = complex(b) - assert_equal(bp, b, msg) - - with assert_warns(DeprecationWarning): - cp = complex(c) - assert_equal(cp, c, msg) - def test__complex__should_not_work(self): dtypes = ['i1', 'i2', 'i4', 'i8', 'u1', 'u2', 'u4', 'u8', @@ -3808,7 +3955,11 @@ def test__complex__should_not_work(self): '?', 'O'] for dt in dtypes: a = np.array([1, 2, 3], dtype=dt) + b = np.array([7], dtype=dt) + c = np.array([[[[[7]]]]], dtype=dt) assert_raises(TypeError, complex, a) + assert_raises(TypeError, complex, b) + assert_raises(TypeError, complex, c) dt = np.dtype([('a', 'f8'), ('b', 'i1')]) b = np.array((1.0, 3), dtype=dt) @@ -3821,8 +3972,7 @@ def test__complex__should_not_work(self): assert_raises(TypeError, complex, d) e = np.array(['1+1j'], 'U') - with assert_warns(DeprecationWarning): - assert_raises(TypeError, complex, e) + assert_raises(TypeError, complex, e) class TestCequenceMethods: def test_array_contains(self): @@ -3867,7 +4017,7 @@ def test_inplace(self): # - defer if other has __array_ufunc__ and it is None # or other is not a subclass and has higher array priority # - else, call ufunc - @pytest.mark.xfail(IS_PYPY, reason="Bug in pypy3.{9, 10}-v7.3.13, #24862") + @pytest.mark.xfail(IS_PYPY, reason="Bug in pypy, #24862") def test_ufunc_binop_interaction(self): # Python method name (without underscores) # -> (numpy ufunc, has_in_place_version, preferred_dtype) @@ -4205,6 +4355,13 @@ def pow_for(exp, arr): assert_equal(obj_arr ** -1, pow_for(-1, obj_arr)) assert_equal(obj_arr ** 2, pow_for(2, obj_arr)) + def test_pow_calls_square_structured_dtype(self): + # gh-29388 + dt = np.dtype([('a', 'i4'), ('b', 'i4')]) + a = np.array([(1, 2), (3, 4)], dtype=dt) + with pytest.raises(TypeError, match="ufunc 'square' not supported"): + a ** 2 + def test_pos_array_ufunc_override(self): class A(np.ndarray): def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): @@ -4362,6 +4519,37 @@ def test_intp_sequence_converters_errors(self, converter): # These converters currently convert overflows to a ValueError converter(2**64) + @pytest.mark.parametrize( + "entry_point", + [ + module + item + for item in ("sin", "strings.str_len", "fft._pocketfft_umath.ifft") + for module in ("", "numpy:") + ] + [ + "numpy.strings:str_len", + "functools:reduce", + "functools:reduce.__doc__" + ] + ) + def test_import_entry_point(self, entry_point): + modname, _, items = entry_point.rpartition(":") + if modname: + module = obj = importlib.import_module(modname) + else: + module = np + exp = functools.reduce(getattr, items.split("."), module) + got = _multiarray_tests.npy_import_entry_point(entry_point) + assert got == exp + + @pytest.mark.parametrize( + "entry_point", + ["sin.", "numpy:", "numpy:sin:__call__", "numpy.sin:__call__."] + ) + def test_import_entry_point_errors(self, entry_point): + # Don't really care about precise error. + with pytest.raises((ImportError, AttributeError)): + _multiarray_tests.npy_import_entry_point(entry_point) + class TestSubscripting: def test_test_zero_rank(self): @@ -4467,6 +4655,7 @@ def test_non_contiguous_array(self): assert_equal(len(buffers), 0) assert_equal(non_contiguous_array, depickled_non_contiguous_array) + @pytest.mark.thread_unsafe(reason="calls gc.collect()") def test_roundtrip(self): for proto in range(2, pickle.HIGHEST_PROTOCOL + 1): carray = np.array([[2, 9], [7, 0], [3, 8]]) @@ -4493,18 +4682,24 @@ def _loads(self, obj): # version 0 pickles, using protocol=2 to pickle # version 0 doesn't have a version field + @pytest.mark.filterwarnings( + "ignore:.*align should be passed:numpy.exceptions.VisibleDeprecationWarning") def test_version0_int8(self): s = b"\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x04\x85cnumpy\ndtype\nq\x04U\x02i1K\x00K\x01\x87Rq\x05(U\x01|NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89U\x04\x01\x02\x03\x04tb." a = np.array([1, 2, 3, 4], dtype=np.int8) p = self._loads(s) assert_equal(a, p) + @pytest.mark.filterwarnings( + "ignore:.*align should be passed:numpy.exceptions.VisibleDeprecationWarning") def test_version0_float32(self): s = b"\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x04\x85cnumpy\ndtype\nq\x04U\x02f4K\x00K\x01\x87Rq\x05(U\x01 16MB + tmp_filename = normalize_filename(tmp_path, param_filename) d = np.zeros(4 * 1024 ** 2) d.tofile(tmp_filename) assert_equal(os.path.getsize(tmp_filename), d.nbytes) @@ -5592,18 +5822,21 @@ def test_largish_file(self, tmp_filename): d.tofile(f) assert_equal(os.path.getsize(tmp_filename), d.nbytes * 2) - def test_io_open_buffered_fromfile(self, x, tmp_filename): + def test_io_open_buffered_fromfile(self, tmp_path, param_filename): # gh-6632 + tmp_filename = normalize_filename(tmp_path, param_filename) + x = self._create_data() x.tofile(tmp_filename) with open(tmp_filename, 'rb', buffering=-1) as f: y = np.fromfile(f, dtype=x.dtype) assert_array_equal(y, x.flat) - def test_file_position_after_fromfile(self, tmp_filename): + def test_file_position_after_fromfile(self, tmp_path, param_filename): # gh-4118 sizes = [io.DEFAULT_BUFFER_SIZE // 8, io.DEFAULT_BUFFER_SIZE, io.DEFAULT_BUFFER_SIZE * 8] + tmp_filename = normalize_filename(tmp_path, param_filename) for size in sizes: with open(tmp_filename, 'wb') as f: @@ -5619,11 +5852,12 @@ def test_file_position_after_fromfile(self, tmp_filename): pos = f.tell() assert_equal(pos, 10, err_msg=err_msg) - def test_file_position_after_tofile(self, tmp_filename): + def test_file_position_after_tofile(self, tmp_path, param_filename): # gh-4118 sizes = [io.DEFAULT_BUFFER_SIZE // 8, io.DEFAULT_BUFFER_SIZE, io.DEFAULT_BUFFER_SIZE * 8] + tmp_filename = normalize_filename(tmp_path, param_filename) for size in sizes: err_msg = "%d" % (size,) @@ -5644,8 +5878,9 @@ def test_file_position_after_tofile(self, tmp_filename): pos = f.tell() assert_equal(pos, 10, err_msg=err_msg) - def test_load_object_array_fromfile(self, tmp_filename): + def test_load_object_array_fromfile(self, tmp_path, param_filename): # gh-12300 + tmp_filename = normalize_filename(tmp_path, param_filename) with open(tmp_filename, 'w') as f: # Ensure we have a file with consistent contents pass @@ -5657,7 +5892,9 @@ def test_load_object_array_fromfile(self, tmp_filename): assert_raises_regex(ValueError, "Cannot read into object array", np.fromfile, tmp_filename, dtype=object) - def test_fromfile_offset(self, x, tmp_filename): + def test_fromfile_offset(self, tmp_path, param_filename): + tmp_filename = normalize_filename(tmp_path, param_filename) + x = self._create_data() with open(tmp_filename, 'wb') as f: x.tofile(f) @@ -5691,23 +5928,21 @@ def test_fromfile_offset(self, x, tmp_filename): np.fromfile, tmp_filename, dtype=x.dtype, sep=",", offset=1) - @pytest.mark.skipif(IS_PYPY, reason="bug in PyPy's PyNumber_AsSsize_t") - def test_fromfile_bad_dup(self, x, tmp_filename): + def test_fromfile_bad_dup(self, tmp_path, param_filename, monkeypatch): def dup_str(fd): return 'abc' def dup_bigint(fd): return 2**68 - old_dup = os.dup - try: - with open(tmp_filename, 'wb') as f: - x.tofile(f) - for dup, exc in ((dup_str, TypeError), (dup_bigint, OSError)): - os.dup = dup - assert_raises(exc, np.fromfile, f) - finally: - os.dup = old_dup + tmp_filename = normalize_filename(tmp_path, param_filename) + x = self._create_data() + + with open(tmp_filename, 'wb') as f: + x.tofile(f) + for dup, exc in ((dup_str, TypeError), (dup_bigint, OSError)): + monkeypatch.setattr(os, "dup", dup) + assert_raises(exc, np.fromfile, f) def _check_from(self, s, value, filename, **kw): if 'sep' not in kw: @@ -5749,38 +5984,44 @@ def test_decimal_comma_separator(): else: assert False, request.param - def test_nan(self, tmp_filename, decimal_sep_localization): + def test_nan(self, tmp_path, param_filename, decimal_sep_localization): + tmp_filename = normalize_filename(tmp_path, param_filename) self._check_from( b"nan +nan -nan NaN nan(foo) +NaN(BAR) -NAN(q_u_u_x_)", [np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan], tmp_filename, sep=' ') - def test_inf(self, tmp_filename, decimal_sep_localization): + def test_inf(self, tmp_path, param_filename, decimal_sep_localization): + tmp_filename = normalize_filename(tmp_path, param_filename) self._check_from( b"inf +inf -inf infinity -Infinity iNfInItY -inF", [np.inf, np.inf, -np.inf, np.inf, -np.inf, np.inf, -np.inf], tmp_filename, sep=' ') - def test_numbers(self, tmp_filename, decimal_sep_localization): + def test_numbers(self, tmp_path, param_filename, decimal_sep_localization): + tmp_filename = normalize_filename(tmp_path, param_filename) self._check_from( b"1.234 -1.234 .3 .3e55 -123133.1231e+133", [1.234, -1.234, .3, .3e55, -123133.1231e+133], tmp_filename, sep=' ') - def test_binary(self, tmp_filename): + def test_binary(self, tmp_path, param_filename): + tmp_filename = normalize_filename(tmp_path, param_filename) self._check_from( b'\x00\x00\x80?\x00\x00\x00@\x00\x00@@\x00\x00\x80@', np.array([1, 2, 3, 4]), tmp_filename, dtype=' 0) assert_(issubclass(w[0].category, RuntimeWarning)) @@ -6414,7 +6696,8 @@ def test_empty(self): assert_equal(f(A, axis=axis), np.zeros([])) def test_mean_values(self): - for mat in [self.rmat, self.cmat, self.omat]: + rmat, cmat, omat = self._create_data() + for mat in [rmat, cmat, omat]: for axis in [0, 1]: tgt = mat.sum(axis=axis) res = _mean(mat, axis=axis) * mat.shape[axis] @@ -6472,7 +6755,8 @@ def test_mean_where(self): assert_equal(np.mean(a, where=False), np.nan) def test_var_values(self): - for mat in [self.rmat, self.cmat, self.omat]: + rmat, cmat, omat = self._create_data() + for mat in [rmat, cmat, omat]: for axis in [0, 1, None]: msqr = _mean(mat * mat.conj(), axis=axis) mean = _mean(mat, axis=axis) @@ -6486,9 +6770,10 @@ def test_var_values(self): ('clongdouble', 7), )) def test_var_complex_values(self, complex_dtype, ndec): + _, cmat, _ = self._create_data() # Test fast-paths for every builtin complex type for axis in [0, 1, None]: - mat = self.cmat.copy().astype(complex_dtype) + mat = cmat.copy().astype(complex_dtype) msqr = _mean(mat * mat.conj(), axis=axis) mean = _mean(mat, axis=axis) tgt = msqr - mean * mean.conjugate() @@ -6498,7 +6783,8 @@ def test_var_complex_values(self, complex_dtype, ndec): def test_var_dimensions(self): # _var paths for complex number introduce additions on views that # increase dimensions. Ensure this generalizes to higher dims - mat = np.stack([self.cmat] * 3) + _, cmat, _ = self._create_data() + mat = np.stack([cmat] * 3) for axis in [0, 1, 2, -1, None]: msqr = _mean(mat * mat.conj(), axis=axis) mean = _mean(mat, axis=axis) @@ -6509,7 +6795,8 @@ def test_var_dimensions(self): def test_var_complex_byteorder(self): # Test that var fast-path does not cause failures for complex arrays # with non-native byteorder - cmat = self.cmat.copy().astype('complex128') + _, cmat, _ = self._create_data() + cmat = cmat.copy().astype('complex128') cmat_swapped = cmat.astype(cmat.dtype.newbyteorder()) assert_almost_equal(cmat.var(), cmat_swapped.var()) @@ -6557,7 +6844,8 @@ def test_var_where(self): assert_equal(np.var(a, where=False), np.nan) def test_std_values(self): - for mat in [self.rmat, self.cmat, self.omat]: + rmat, cmat, omat = self._create_data() + for mat in [rmat, cmat, omat]: for axis in [0, 1, None]: tgt = np.sqrt(_var(mat, axis=axis)) res = _std(mat, axis=axis) @@ -6688,61 +6976,63 @@ def test_vdot_uncontiguous(self): class TestDot: - def setup_method(self): - np.random.seed(128) - self.A = np.random.rand(4, 2) - self.b1 = np.random.rand(2, 1) - self.b2 = np.random.rand(2) - self.b3 = np.random.rand(1, 2) - self.b4 = np.random.rand(4) - self.N = 7 + N = 7 + + def _create_data(self): + rng = np.random.RandomState(128) + A = rng.random((4, 2)) + b1 = rng.random((2, 1)) + b2 = rng.random(2) + b3 = rng.random((1, 2)) + b4 = rng.random(4) + return A, b1, b2, b3, b4 def test_dotmatmat(self): - A = self.A + A, _, _, _, _ = self._create_data() res = np.dot(A.transpose(), A) tgt = np.array([[1.45046013, 0.86323640], [0.86323640, 0.84934569]]) assert_almost_equal(res, tgt, decimal=self.N) def test_dotmatvec(self): - A, b1 = self.A, self.b1 + A, b1, _, _, _ = self._create_data() res = np.dot(A, b1) tgt = np.array([[0.32114320], [0.04889721], [0.15696029], [0.33612621]]) assert_almost_equal(res, tgt, decimal=self.N) def test_dotmatvec2(self): - A, b2 = self.A, self.b2 + A, _, b2, _, _ = self._create_data() res = np.dot(A, b2) tgt = np.array([0.29677940, 0.04518649, 0.14468333, 0.31039293]) assert_almost_equal(res, tgt, decimal=self.N) def test_dotvecmat(self): - A, b4 = self.A, self.b4 + A, _, _, _, b4 = self._create_data() res = np.dot(b4, A) tgt = np.array([1.23495091, 1.12222648]) assert_almost_equal(res, tgt, decimal=self.N) def test_dotvecmat2(self): - b3, A = self.b3, self.A + A, _, _, b3, _ = self._create_data() res = np.dot(b3, A.transpose()) tgt = np.array([[0.58793804, 0.08957460, 0.30605758, 0.62716383]]) assert_almost_equal(res, tgt, decimal=self.N) def test_dotvecmat3(self): - A, b4 = self.A, self.b4 + A, _, _, _, b4 = self._create_data() res = np.dot(A.transpose(), b4) tgt = np.array([1.23495091, 1.12222648]) assert_almost_equal(res, tgt, decimal=self.N) def test_dotvecvecouter(self): - b1, b3 = self.b1, self.b3 + _, b1, _, b3, _ = self._create_data() res = np.dot(b1, b3) tgt = np.array([[0.20128610, 0.08400440], [0.07190947, 0.03001058]]) assert_almost_equal(res, tgt, decimal=self.N) def test_dotvecvecinner(self): - b1, b3 = self.b1, self.b3 + _, b1, _, b3, _ = self._create_data() res = np.dot(b3, b1) tgt = np.array([[0.23129668]]) assert_almost_equal(res, tgt, decimal=self.N) @@ -6762,17 +7052,17 @@ def test_dotcolumnvect2(self): assert_almost_equal(res, tgt, decimal=self.N) def test_dotvecscalar(self): - np.random.seed(100) - b1 = np.random.rand(1, 1) - b2 = np.random.rand(1, 4) + rng = np.random.RandomState(100) + b1 = rng.random((1, 1)) + b2 = rng.random((1, 4)) res = np.dot(b1, b2) tgt = np.array([[0.15126730, 0.23068496, 0.45905553, 0.00256425]]) assert_almost_equal(res, tgt, decimal=self.N) def test_dotvecscalar2(self): - np.random.seed(100) - b1 = np.random.rand(4, 1) - b2 = np.random.rand(1, 1) + rng = np.random.RandomState(100) + b1 = rng.random((4, 1)) + b2 = rng.random((1, 1)) res = np.dot(b1, b2) tgt = np.array([[0.00256425], [0.00131359], [0.00200324], [0.00398638]]) assert_almost_equal(res, tgt, decimal=self.N) @@ -6975,6 +7265,7 @@ def assert_dot_close(A, X, desired): @pytest.mark.slow @pytest.mark.parametrize("dtype", [np.float64, np.complex128]) @requires_memory(free_bytes=18e9) # complex case needs 18GiB+ + @pytest.mark.thread_unsafe(reason="crashes with low memory") def test_huge_vectordot(self, dtype): # Large vector multiplications are chunked with 32bit BLAS # Test that the chunking does the right thing, see also gh-22262 @@ -7241,8 +7532,8 @@ def test_out_arg(self): out = np.zeros((5, 2), dtype=np.complex128) c = self.matmul(a, b, out=out) assert_(c is out) - with suppress_warnings() as sup: - sup.filter(ComplexWarning, '') + with warnings.catch_warnings(): + warnings.simplefilter('ignore', ComplexWarning) c = c.astype(tgt.dtype) assert_array_equal(c, tgt) @@ -7317,6 +7608,34 @@ def test_dot_equivalent(self, args): r3 = np.matmul(args[0].copy(), args[1].copy()) assert_equal(r1, r3) + # issue 29164 with extra checks + @pytest.mark.parametrize('dtype', ( + np.float32, np.float64, np.complex64, np.complex128 + )) + def test_dot_equivalent_matrix_matrix_blastypes(self, dtype): + modes = list(itertools.product(['C', 'F'], [True, False])) + + def apply_mode(m, mode): + order, is_contiguous = mode + if is_contiguous: + return m.copy() if order == 'C' else m.T.copy().T + + retval = np.zeros( + (m.shape[0] * 2, m.shape[1] * 2), dtype=m.dtype, order=order + )[::2, ::2] + retval[...] = m + return retval + + is_complex = np.issubdtype(dtype, np.complexfloating) + m1 = self.m1.astype(dtype) + (1j if is_complex else 0) + m2 = self.m2.astype(dtype) + (1j if is_complex else 0) + dot_res = np.dot(m1, m2) + mo = np.zeros_like(dot_res) + + for mode in itertools.product(*[modes] * 3): + m1_, m2_, mo_ = [apply_mode(*x) for x in zip([m1, m2, mo], mode)] + assert_equal(np.matmul(m1_, m2_, out=mo_), dot_res) + def test_matmul_object(self): import fractions @@ -7561,23 +7880,27 @@ def test_3d_tensor(self): class TestChoose: - def setup_method(self): - self.x = 2 * np.ones((3,), dtype=int) - self.y = 3 * np.ones((3,), dtype=int) - self.x2 = 2 * np.ones((2, 3), dtype=int) - self.y2 = 3 * np.ones((2, 3), dtype=int) - self.ind = [0, 0, 1] + def _create_data(self): + x = 2 * np.ones((3,), dtype=int) + y = 3 * np.ones((3,), dtype=int) + x2 = 2 * np.ones((2, 3), dtype=int) + y2 = 3 * np.ones((2, 3), dtype=int) + ind = [0, 0, 1] + return x, y, x2, y2, ind def test_basic(self): - A = np.choose(self.ind, (self.x, self.y)) + x, y, _, _, ind = self._create_data() + A = np.choose(ind, (x, y)) assert_equal(A, [2, 2, 3]) def test_broadcast1(self): - A = np.choose(self.ind, (self.x2, self.y2)) + _, _, x2, y2, ind = self._create_data() + A = np.choose(ind, (x2, y2)) assert_equal(A, [[2, 2, 3], [2, 2, 3]]) def test_broadcast2(self): - A = np.choose(self.ind, (self.x, self.y2)) + x, _, _, y2, ind = self._create_data() + A = np.choose(ind, (x, y2)) assert_equal(A, [[2, 2, 3], [2, 2, 3]]) @pytest.mark.parametrize("ops", @@ -7607,38 +7930,43 @@ def test_dimension_and_args_limit(self): class TestRepeat: - def setup_method(self): - self.m = np.array([1, 2, 3, 4, 5, 6]) - self.m_rect = self.m.reshape((2, 3)) + def _create_data(self): + m = np.array([1, 2, 3, 4, 5, 6]) + m_rect = m.reshape((2, 3)) + return m, m_rect def test_basic(self): - A = np.repeat(self.m, [1, 3, 2, 1, 1, 2]) + m, _ = self._create_data() + A = np.repeat(m, [1, 3, 2, 1, 1, 2]) assert_equal(A, [1, 2, 2, 2, 3, 3, 4, 5, 6, 6]) def test_broadcast1(self): - A = np.repeat(self.m, 2) + m, _ = self._create_data() + A = np.repeat(m, 2) assert_equal(A, [1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6]) def test_axis_spec(self): - A = np.repeat(self.m_rect, [2, 1], axis=0) + _, m_rect = self._create_data() + A = np.repeat(m_rect, [2, 1], axis=0) assert_equal(A, [[1, 2, 3], [1, 2, 3], [4, 5, 6]]) - A = np.repeat(self.m_rect, [1, 3, 2], axis=1) + A = np.repeat(m_rect, [1, 3, 2], axis=1) assert_equal(A, [[1, 2, 2, 2, 3, 3], [4, 5, 5, 5, 6, 6]]) def test_broadcast2(self): - A = np.repeat(self.m_rect, 2, axis=0) + _, m_rect = self._create_data() + A = np.repeat(m_rect, 2, axis=0) assert_equal(A, [[1, 2, 3], [1, 2, 3], [4, 5, 6], [4, 5, 6]]) - A = np.repeat(self.m_rect, 2, axis=1) + A = np.repeat(m_rect, 2, axis=1) assert_equal(A, [[1, 1, 2, 2, 3, 3], [4, 4, 5, 5, 6, 6]]) @@ -8302,6 +8630,7 @@ def test_padding(self): x = np.array([(1,), (2,)], dtype={'f0': (int, j)}) self._check_roundtrip(x) + @pytest.mark.thread_unsafe(reason="test result depends on the reference count of a global object") def test_reference_leak(self): if HAS_REFCOUNT: count_1 = sys.getrefcount(np._core._internal) @@ -8332,10 +8661,13 @@ def test_padded_struct_array(self): self._check_roundtrip(x3) @pytest.mark.valgrind_error(reason="leaks buffer info cache temporarily.") - def test_relaxed_strides(self, c=np.ones((1, 10, 10), dtype='i8')): # noqa: B008 + def test_relaxed_strides(self, c=stride_tricks.as_strided( # noqa: B008 + np.ones((1, 10, 10), dtype='i8'), # noqa: B008 + strides=(-1, 80, 8) + ) + ): # Note: c defined as parameter so that it is persistent and leak # checks will notice gh-16934 (buffer info cache leak). - c.strides = (-1, 80, 8) # strides need to be fixed at export assert_(memoryview(c).strides == (800, 80, 8)) @@ -8420,6 +8752,7 @@ class foo(ctypes.Structure): assert_equal(arr['a'], 3) @pytest.mark.parametrize("obj", [np.ones(3), np.ones(1, dtype="i,i")[()]]) + @pytest.mark.thread_unsafe(reason="_multiarray_tests used memoryview, which is thread-unsafe") def test_error_if_stored_buffer_info_is_corrupted(self, obj): """ If a user extends a NumPy array before 1.20 and then runs it @@ -8758,8 +9091,9 @@ def test_multiarray_writable_attributes_deletion(self): # ticket #2046, should not seqfault, raise AttributeError a = np.ones(2) attr = ['shape', 'strides', 'data', 'dtype', 'real', 'imag', 'flat'] - with suppress_warnings() as sup: - sup.filter(DeprecationWarning, "Assigning the 'data' attribute") + with warnings.catch_warnings(): + warnings.filterwarnings( + 'ignore', "Assigning the 'data' attribute", DeprecationWarning) for s in attr: assert_raises(AttributeError, delattr, a, s) @@ -8812,7 +9146,10 @@ def __array_interface__(self): (f, {'strides': ()}, 0.5), (f, {'strides': (2,)}, ValueError), (f, {'strides': 16}, TypeError), + # This fails due to going into the buffer protocol path + (f, {'data': None, 'shape': ()}, TypeError), ]) + @pytest.mark.thread_unsafe(reason="test result depends on the reference count of a global object") def test_scalar_interface(self, val, iface, expected): # Test scalar coercion within the array interface self.f.iface = {'typestr': 'f8'} @@ -8830,13 +9167,50 @@ def test_scalar_interface(self, val, iface, expected): post_cnt = sys.getrefcount(np.dtype('f8')) assert_equal(pre_cnt, post_cnt) -def test_interface_no_shape(): + +def test_interface_empty_shape(): class ArrayLike: array = np.array(1) __array_interface__ = array.__array_interface__ assert_equal(np.array(ArrayLike()), 1) +def test_interface_no_shape_error(): + class ArrayLike: + __array_interface__ = {"data": None, "typestr": "f8"} + + with pytest.raises(ValueError, match="Missing __array_interface__ shape"): + np.array(ArrayLike()) + + +@pytest.mark.parametrize("iface", [ + {"typestr": "f8", "shape": (0, 1)}, + {"typestr": "(0,)f8,", "shape": (1, 3)}, +]) +def test_interface_nullptr(iface): + iface.update({"data": (0, True)}) + + class ArrayLike: + __array_interface__ = iface + + arr = np.asarray(ArrayLike()) + # Note, we currently set the base anyway, but we do an allocation + # (because NumPy doesn't like NULL data pointers everywhere). + assert arr.shape == iface["shape"] + assert arr.dtype == np.dtype(iface["typestr"]) + assert arr.base is not None + assert arr.flags.owndata + + +def test_interface_nullptr_size_check(): + # Note that prior to NumPy 2.4 the below took the scalar path (if shape had size 1) + class ArrayLike: + __array_interface__ = {"data": (0, True), "typestr": "f8", "shape": ()} + + with pytest.raises(ValueError, match="data is NULL but array contains data"): + np.array(ArrayLike()) + + def test_array_interface_itemsize(): # See gh-6361 my_dtype = np.dtype({'names': ['A', 'B'], 'formats': ['f4', 'f4'], @@ -8977,6 +9351,7 @@ def test_to_bool_scalar(self): assert_equal(bool(np.array([True])), True) assert_equal(bool(np.array([[42]])), True) + @requires_deep_recursion def test_to_bool_scalar_not_convertible(self): class NotConvertible: @@ -8985,11 +9360,6 @@ def __bool__(self): assert_raises(NotImplementedError, bool, np.array(NotConvertible())) assert_raises(NotImplementedError, bool, np.array([NotConvertible()])) - if IS_PYSTON: - pytest.skip("Pyston disables recursion checking") - if IS_WASM: - pytest.skip("Pyodide/WASM has limited stack size") - self_containing = np.array([None]) self_containing[0] = self_containing @@ -9013,10 +9383,8 @@ def test_to_int_scalar(self): int_funcs = (int, lambda x: x.__int__()) for int_func in int_funcs: assert_equal(int_func(np.array(0)), 0) - with assert_warns(DeprecationWarning): - assert_equal(int_func(np.array([1])), 1) - with assert_warns(DeprecationWarning): - assert_equal(int_func(np.array([[42]])), 42) + assert_raises(TypeError, int_func, np.array([1])) + assert_raises(TypeError, int_func, np.array([[42]])) assert_raises(TypeError, int_func, np.array([1, 2])) # gh-9972 @@ -9029,9 +9397,24 @@ def __int__(self): raise NotImplementedError assert_raises(NotImplementedError, int_func, np.array(NotConvertible())) - with assert_warns(DeprecationWarning): - assert_raises(NotImplementedError, - int_func, np.array([NotConvertible()])) + assert_raises(TypeError, + int_func, np.array([NotConvertible()])) + + def test_to_float_scalar(self): + float_funcs = (float, lambda x: x.__float__()) + for float_func in float_funcs: + assert_equal(float_func(np.array(0)), 0.0) + assert_equal(float_func(np.array(1.0, np.float64)), 1.0) + assert_raises(TypeError, float_func, np.array([2])) + assert_raises(TypeError, float_func, np.array([3.14])) + assert_raises(TypeError, float_func, np.array([[4.0]])) + + assert_equal(5.0, float_func(np.array('5'))) + assert_equal(5.1, float_func(np.array('5.1'))) + assert_equal(6.0, float_func(np.bytes_(b'6'))) + assert_equal(6.1, float_func(np.bytes_(b'6.1'))) + assert_equal(7.0, float_func(np.str_('7'))) + assert_equal(7.1, float_func(np.str_('7.1'))) class TestWhere: @@ -9440,9 +9823,6 @@ def test_1d_format(self): assert_raises(TypeError, '{:30}'.format, a) -from numpy.testing import IS_PYPY - - class TestCTypes: def test_ctypes_is_available(self): @@ -9451,6 +9831,7 @@ def test_ctypes_is_available(self): assert_equal(ctypes, test_arr.ctypes._ctypes) assert_equal(tuple(test_arr.ctypes.shape), (2, 3)) + @pytest.mark.thread_unsafe(reason="modifies global module state") def test_ctypes_is_not_available(self): from numpy._core import _internal _internal.ctypes = None @@ -9467,6 +9848,7 @@ def _make_readonly(x): x.flags.writeable = False return x + @pytest.mark.thread_unsafe(reason="calls gc.collect()") @pytest.mark.parametrize('arr', [ np.array([1, 2, 3]), np.array([['one', 'two'], ['three', 'four']]), @@ -9515,6 +9897,7 @@ def test_ctypes_data_as_holds_reference(self, arr): break_cycles() assert_(arr_ref() is None, "unknowable whether ctypes pointer holds a reference") + @pytest.mark.thread_unsafe(reason="calls gc.collect()") def test_ctypes_as_parameter_holds_reference(self): arr = np.array([None]).copy() @@ -9619,12 +10002,10 @@ def test_view_assign(self): @pytest.mark.leaks_references( reason="increments self in dealloc; ignore since deprecated path.") def test_dealloc_warning(self): - with suppress_warnings() as sup: - sup.record(RuntimeWarning) - arr = np.arange(9).reshape(3, 3) - v = arr.T + arr = np.arange(9).reshape(3, 3) + v = arr.T + with pytest.warns(RuntimeWarning): _multiarray_tests.npy_abuse_writebackifcopy(v) - assert len(sup.log) == 1 def test_view_discard_refcount(self): from numpy._core._multiarray_tests import ( @@ -9840,6 +10221,7 @@ def __array_finalize__(self, obj): with pytest.raises(RuntimeError, match="boohoo!"): np.arange(10).view(BadAttributeArray) + @pytest.mark.thread_unsafe(reason="calls gc.collect()") def test_lifetime_on_error(self): # gh-11237 class RaisesInFinalize(np.ndarray): @@ -10144,8 +10526,8 @@ def test_strided_loop_alignments(self): xf128 = _aligned_zeros(3, np.longdouble, align=align) # test casting, both to and from misaligned - with suppress_warnings() as sup: - sup.filter(ComplexWarning, "Casting complex values") + with warnings.catch_warnings(): + warnings.filterwarnings('ignore', "Casting complex values", ComplexWarning) xc64.astype('f8') xf64.astype(np.complex64) test = xc64 + xf64 @@ -10182,7 +10564,6 @@ def test_getfield(): pytest.raises(ValueError, a.getfield, 'uint8', 16) pytest.raises(ValueError, a.getfield, 'uint64', 0) - class TestViewDtype: """ Verify that making a view of a non-contiguous array works as expected. @@ -10360,6 +10741,7 @@ def test_argsort_largearrays(dtype): assert_arg_sorted(arr, np.argsort(arr, kind='quick')) @pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts") +@pytest.mark.thread_unsafe(reason="test result depends on the reference count of a global object") def test_gh_22683(): b = 777.68760986 a = np.array([b] * 10000, dtype=object) @@ -10534,3 +10916,89 @@ def test_array_dunder_array_preserves_dtype_on_none(dtype): a = np.array([1], dtype=dtype) b = a.__array__(None) assert_array_equal(a, b, strict=True) + + +@pytest.mark.skipif(sys.flags.optimize == 2, reason="Python running -OO") +@pytest.mark.skipif(IS_PYPY, reason="PyPy does not modify tp_doc") +class TestTextSignatures: + @pytest.mark.parametrize( + "methodname", + [ + "__array__", "__array_finalize__", "__array_function__", "__array_ufunc__", + "__array_wrap__", "__complex__", "__copy__", "__deepcopy__", + "__reduce__", "__reduce_ex__", "__setstate__", + "all", "any", "argmax", "argmin", "argsort", "argpartition", "astype", + "byteswap", "choose", "clip", "compress", "conj", "conjugate", "copy", + "cumprod", "cumsum", "diagonal", "dot", "dump", "dumps", "fill", "flatten", + "getfield", "item", "max", "mean", "min", "nonzero", "prod", "put", "ravel", + "repeat", "reshape", "resize", "round", "searchsorted", "setfield", + "setflags", "sort", "partition", "squeeze", "std", "sum", "swapaxes", + "take", "tofile", "tolist", "tobytes", "trace", "transpose", "var", "view", + "__array_namespace__", "__dlpack__", "__dlpack_device__", "to_device", + ], + ) + def test_array_method_signatures(self, methodname: str): + method = getattr(np.ndarray, methodname) + assert callable(method) + + try: + sig = inspect.signature(method) + except ValueError as e: + pytest.fail(f"Could not get signature for np.ndarray.{methodname}: {e}") + + assert "self" in sig.parameters + assert sig.parameters["self"].kind is inspect.Parameter.POSITIONAL_ONLY + + @pytest.mark.parametrize("func", [np.empty_like, np.concatenate]) + def test_c_func_dispatcher_text_signature(self, func): + text_sig = func.__wrapped__.__text_signature__ + assert text_sig.startswith("(") and text_sig.endswith(")") + + sig = inspect.signature(func) + assert sig == inspect.signature(func.__wrapped__) + assert not hasattr(func, "__signature__") + + with pytest.raises(ValueError): + inspect.signature(func, follow_wrapped=False) + + @pytest.mark.parametrize( + "func", + [ + np.inner, np.where, np.lexsort, np.can_cast, np.min_scalar_type, + np.result_type, np.dot, np.vdot, np.bincount, np.ravel_multi_index, + np.unravel_index, np.copyto, np.putmask, np.packbits, np.unpackbits, + np.shares_memory, np.may_share_memory, np.is_busday, np.busday_offset, + np.busday_count, np.datetime_as_string, + ], + ) + def test_c_func_dispatcher_signature(self, func): + sig = inspect.signature(func) + + assert hasattr(func, "__signature__") + assert sig == func.__signature__ + assert sig.parameters + + @pytest.mark.parametrize(("func", "parameter_names"), [ + (np.arange, ("start_or_stop", "stop", "step", "dtype", "device", "like")), + (np.busdaycalendar, ("weekmask", "holidays")), + (np.char.compare_chararrays, ("a1", "a2", "cmp", "rstrip")), + (np.datetime_data, ("dtype",)), + (np.from_dlpack, ("x", "device", "copy")), + (np.frombuffer, ("buffer", "dtype", "count", "offset", "like")), + (np.fromfile, ("file", "dtype", "count", "sep", "offset", "like")), + (np.fromiter, ("iter", "dtype", "count", "like")), + (np.frompyfunc, ("func", "nin", "nout", "kwargs")), + (np.fromstring, ("string", "dtype", "count", "sep", "like")), + (np.nested_iters, ( + "op", "axes", "flags", "op_flags", "op_dtypes", "order", "casting", + "buffersize", + )), + (np.promote_types, ("type1", "type2")), + ]) + def test_add_newdoc_function_signature(self, func, parameter_names): + assert not hasattr(func, "__signature__") + assert getattr(func, "__text_signature__", None) + + sig = inspect.signature(func) + assert sig.parameters + assert tuple(sig.parameters) == parameter_names diff --git a/numpy/_core/tests/test_multiprocessing.py b/numpy/_core/tests/test_multiprocessing.py new file mode 100644 index 000000000000..2c5c2fcfb8ed --- /dev/null +++ b/numpy/_core/tests/test_multiprocessing.py @@ -0,0 +1,51 @@ +import pytest + +import numpy as np +from numpy.testing import IS_WASM + +pytestmark = pytest.mark.thread_unsafe( + reason="tests in this module are explicitly multi-processed" +) + +def bool_array_writer(shm_name, n): + # writer routine for test_read_write_bool_array + import time + from multiprocessing import shared_memory + shm = shared_memory.SharedMemory(name=shm_name) + arr = np.ndarray(n, dtype=np.bool_, buffer=shm.buf) + for i in range(n): + arr[i] = True + time.sleep(0.00001) + +def bool_array_reader(shm_name, n): + # reader routine for test_read_write_bool_array + from multiprocessing import shared_memory + shm = shared_memory.SharedMemory(name=shm_name) + arr = np.ndarray(n, dtype=np.bool_, buffer=shm.buf) + for i in range(n): + while not arr[i]: + pass + +@pytest.mark.skipif(IS_WASM, + reason="WASM does not support _posixshmem") +def test_read_write_bool_array(): + # See: gh-30389 + # + # Prior to Python 3.13, boolean scalar singletons (np.True / np.False) were + # regular reference-counted objects. Due to the double evaluation in + # PyArrayScalar_RETURN_BOOL_FROM_LONG, concurrent reads and writes of a + # boolean array could corrupt their refcounts, potentially causing a crash + # (e.g., `free(): invalid pointer`). + # + # This test creates a multi-process race between a writer and a reader to + # ensure that NumPy does not exhibit such failures. + from concurrent.futures import ProcessPoolExecutor + from multiprocessing import shared_memory + n = 10000 + shm = shared_memory.SharedMemory(create=True, size=n) + with ProcessPoolExecutor(max_workers=2) as executor: + f_writer = executor.submit(bool_array_writer, shm.name, n) + f_reader = executor.submit(bool_array_reader, shm.name, n) + shm.unlink() + f_writer.result() + f_reader.result() diff --git a/numpy/_core/tests/test_multithreading.py b/numpy/_core/tests/test_multithreading.py index 09f907561ae5..44b2c34cd68b 100644 --- a/numpy/_core/tests/test_multithreading.py +++ b/numpy/_core/tests/test_multithreading.py @@ -1,19 +1,22 @@ import concurrent.futures -import string import threading import pytest import numpy as np from numpy._core import _rational_tests +from numpy._core.tests.test_stringdtype import random_unicode_string_list from numpy.testing import IS_64BIT, IS_WASM from numpy.testing._private.utils import run_threaded if IS_WASM: pytest.skip(allow_module_level=True, reason="no threading support in wasm") +pytestmark = pytest.mark.thread_unsafe( + reason="tests in this module are already explicitly multi-threaded" +) -def test_parallel_randomstate_creation(): +def test_parallel_randomstate(): # if the coercion cache is enabled and not thread-safe, creating # RandomState instances simultaneously leads to a data race def func(seed): @@ -21,16 +24,26 @@ def func(seed): run_threaded(func, 500, pass_count=True) + # seeding and setting state shouldn't race with generating RNG samples + rng = np.random.RandomState() + + def func(seed): + base_rng = np.random.RandomState(seed) + state = base_rng.get_state() + rng.seed(seed) + rng.random() + rng.set_state(state) + + run_threaded(func, 8, pass_count=True) def test_parallel_ufunc_execution(): # if the loop data cache or dispatch cache are not thread-safe # computing ufuncs simultaneously in multiple threads leads # to a data race that causes crashes or spurious exceptions - def func(): - arr = np.random.random((25,)) - np.isnan(arr) - - run_threaded(func, 500) + for dtype in [np.float32, np.float64, np.int32]: + for op in [np.random.random((25,)).astype(dtype), dtype(25)]: + for ufunc in [np.isnan, np.sin]: + run_threaded(lambda: ufunc(op), 500) # see gh-26690 NUM_THREADS = 50 @@ -120,6 +133,8 @@ def legacy_125(): task1.start() task2.start() + task1.join() + task2.join() def test_parallel_reduction(): @@ -218,16 +233,12 @@ def func(arr): assert arr.dtype is dt -def test_stringdtype_multithreaded_access_and_mutation( - dtype, random_string_list): +def test_stringdtype_multithreaded_access_and_mutation(): # this test uses an RNG and may crash or cause deadlocks if there is a # threading bug rng = np.random.default_rng(0x4D3D3D3) - chars = list(string.ascii_letters + string.digits) - chars = np.array(chars, dtype="U1") - ret = rng.choice(chars, size=100 * 10, replace=True) - random_string_list = ret.view("U100") + string_list = random_unicode_string_list() def func(arr): rnd = rng.random() @@ -247,10 +258,10 @@ def func(arr): else: np.multiply(arr, np.int64(2), out=arr) else: - arr[:] = random_string_list + arr[:] = string_list with concurrent.futures.ThreadPoolExecutor(max_workers=8) as tpe: - arr = np.array(random_string_list, dtype=dtype) + arr = np.array(string_list, dtype="T") futures = [tpe.submit(func, arr) for _ in range(500)] for f in futures: @@ -272,12 +283,15 @@ def closure(b): def test_nonzero(dtype): # See: gh-28361 # - # np.nonzero uses np.count_nonzero to determine the size of the output array - # In a second pass the indices of the non-zero elements are determined, but they can have changed + # np.nonzero uses np.count_nonzero to determine the size of the output. + # array. In a second pass the indices of the non-zero elements are + # determined, but they can have changed # - # This test triggers a data race which is suppressed in the TSAN CI. The test is to ensure - # np.nonzero does not generate a segmentation fault + # This test triggers a data race which is suppressed in the TSAN CI. + # The test is to ensure np.nonzero does not generate a segmentation fault x = np.random.randint(4, size=100).astype(dtype) + expected_warning = ('number of non-zero array elements changed' + ' during function execution') def func(index): for _ in range(10): @@ -287,6 +301,53 @@ def func(index): try: _ = np.nonzero(x) except RuntimeError as ex: - assert 'number of non-zero array elements changed during function execution' in str(ex) + assert expected_warning in str(ex) run_threaded(func, max_workers=10, pass_count=True, outer_iterations=5) + + +# These are all implemented using PySequence_Fast, which needs locking to be safe +def np_broadcast(arrs): + for i in range(100): + np.broadcast(arrs) + +def create_array(arrs): + for i in range(100): + np.array(arrs) + +def create_nditer(arrs): + for i in range(1000): + np.nditer(arrs) + +@pytest.mark.parametrize("kernel", (np_broadcast, create_array, create_nditer)) +def test_arg_locking(kernel): + # should complete without failing or generating an error about an array size + # changing + + b = threading.Barrier(5) + done = 0 + arrs = [] + + def read_arrs(): + nonlocal done + b.wait() + try: + kernel(arrs) + finally: + done += 1 + + def mutate_list(): + b.wait() + while done < 4: + if len(arrs) > 10: + arrs.pop(0) + elif len(arrs) <= 10: + arrs.extend([np.array([1, 2, 3]) for _ in range(1000)]) + + arrs = [np.array([1, 2, 3]) for _ in range(1000)] + + tasks = [threading.Thread(target=read_arrs) for _ in range(4)] + tasks.append(threading.Thread(target=mutate_list)) + + [t.start() for t in tasks] + [t.join() for t in tasks] diff --git a/numpy/_core/tests/test_nditer.py b/numpy/_core/tests/test_nditer.py index ec28e48c5046..d2fc69a03b5f 100644 --- a/numpy/_core/tests/test_nditer.py +++ b/numpy/_core/tests/test_nditer.py @@ -1,21 +1,24 @@ +import inspect import subprocess import sys import textwrap +import warnings -import numpy._core._multiarray_tests as _multiarray_tests import pytest import numpy as np +import numpy._core._multiarray_tests as _multiarray_tests import numpy._core.umath as ncu from numpy import all, arange, array, nditer from numpy.testing import ( HAS_REFCOUNT, + IS_64BIT, + IS_PYPY, IS_WASM, assert_, assert_array_equal, assert_equal, assert_raises, - suppress_warnings, ) from numpy.testing._private.utils import requires_memory @@ -858,7 +861,7 @@ def test_iter_nbo_align_contig(): # Unaligned input a = np.zeros((6 * 4 + 1,), dtype='i1')[1:] - a.dtype = 'f4' + a = a.view('f4') a[:] = np.arange(6, dtype='f4') assert_(not a.flags.aligned) # Without 'aligned', shouldn't copy @@ -1803,7 +1806,7 @@ def test_iter_buffering(): arrays.append(np.arange(10, dtype='f4')) # Unaligned array a = np.zeros((4 * 16 + 1,), dtype='i1')[1:] - a.dtype = 'i4' + a = a.view('i4') a[:] = np.arange(16, dtype='i4') arrays.append(a) # 4-D F-order array @@ -1899,8 +1902,8 @@ def test_iter_buffered_cast_byteswapped(): assert_equal(a, 2 * np.arange(10, dtype='f4')) - with suppress_warnings() as sup: - sup.filter(np.exceptions.ComplexWarning) + with warnings.catch_warnings(): + warnings.simplefilter('ignore', np.exceptions.ComplexWarning) a = np.arange(10, dtype='f8') a = a.view(a.dtype.newbyteorder()).byteswap() @@ -2895,7 +2898,7 @@ def _is_buffered(iterator): return True return False -@pytest.mark.parametrize("a", +@pytest.mark.parametrize("arrs", [np.zeros((3,), dtype='f8'), np.zeros((9876, 3 * 5), dtype='f8')[::2, :], np.zeros((4, 312, 124, 3), dtype='f8')[::2, :, ::2, :], @@ -2904,10 +2907,11 @@ def _is_buffered(iterator): np.zeros((9,), dtype='f8')[::3], np.zeros((9876, 3 * 10), dtype='f8')[::2, ::5], np.zeros((4, 312, 124, 3), dtype='f8')[::2, :, ::2, ::-1]]) -def test_iter_writemasked(a): +def test_iter_writemasked(arrs): # Note, the slicing above is to ensure that nditer cannot combine multiple # axes into one. The repetition is just to make things a bit more # interesting. + a = arrs.copy() shape = a.shape reps = shape[-1] // 3 msk = np.empty(shape, dtype=bool) @@ -3202,6 +3206,13 @@ def test_iter_too_large_with_multiindex(): with assert_raises(ValueError): _multiarray_tests.test_nditer_too_large(arrays, i * 2 + 1, mode) + +def test_invalid_call_of_enable_external_loop(): + with pytest.raises(ValueError, + match='Iterator flag EXTERNAL_LOOP cannot be used'): + np.nditer(([[1], [2]], [3, 4]), ['multi_index']).enable_external_loop() + + def test_writebacks(): a = np.arange(6, dtype='f4') au = a.byteswap() @@ -3310,13 +3321,10 @@ def test_warn_noclose(): a = np.arange(6, dtype='f4') au = a.byteswap() au = au.view(au.dtype.newbyteorder()) - with suppress_warnings() as sup: - sup.record(RuntimeWarning) + with pytest.warns(RuntimeWarning): it = np.nditer(au, [], [['readwrite', 'updateifcopy']], - casting='equiv', op_dtypes=[np.dtype('f4')]) + casting='equiv', op_dtypes=[np.dtype('f4')]) del it - assert len(sup.log) == 1 - @pytest.mark.parametrize(["in_dtype", "buf_dtype"], [("i", "O"), ("O", "i"), # most simple cases @@ -3404,7 +3412,9 @@ def test_arbitrary_number_of_ops_nested(): @pytest.mark.slow +@pytest.mark.skipif(not IS_64BIT, reason="test requires 64-bit system") @requires_memory(9 * np.iinfo(np.intc).max) +@pytest.mark.thread_unsafe(reason="crashes with low memory") def test_arbitrary_number_of_ops_error(): # A different error may happen for more than integer operands, but that # is too large to test nicely. @@ -3417,6 +3427,7 @@ def test_arbitrary_number_of_ops_error(): np.nested_iters(args, [[0], []]) +@pytest.mark.thread_unsafe(reason="capfd is thread-unsafe") def test_debug_print(capfd): """ Matches the expected output of a debug print with the actual output. @@ -3496,3 +3507,27 @@ def test_debug_print(capfd): # The actual output may have additional pointers listed that are # stripped from the example output: assert res_line.startswith(expected_line.strip()) + + +@pytest.mark.skipif(sys.flags.optimize == 2, reason="Python running -OO") +@pytest.mark.xfail(IS_PYPY, reason="PyPy does not modify tp_doc") +def test_signature_constructor(): + sig = inspect.signature(np.nditer) + + assert sig.parameters + assert "self" not in sig.parameters + assert "args" not in sig.parameters + assert "kwargs" not in sig.parameters + + +@pytest.mark.skipif(sys.flags.optimize == 2, reason="Python running -OO") +@pytest.mark.xfail(IS_PYPY, reason="PyPy does not modify tp_doc") +@pytest.mark.parametrize( + "method", + [fn for name, fn in vars(np.nditer).items() if callable(fn) and name[0] != "_"], +) +def test_signature_methods(method): + sig = inspect.signature(method) + + assert "self" in sig.parameters + assert sig.parameters["self"].kind is inspect.Parameter.POSITIONAL_ONLY diff --git a/numpy/_core/tests/test_numeric.py b/numpy/_core/tests/test_numeric.py index 8e786bf13d9e..9e71b7c6b1b8 100644 --- a/numpy/_core/tests/test_numeric.py +++ b/numpy/_core/tests/test_numeric.py @@ -1,3 +1,4 @@ +import inspect import itertools import math import platform @@ -6,19 +7,19 @@ from decimal import Decimal import pytest -from hypothesis import given -from hypothesis import strategies as st +from hypothesis import given, strategies as st from hypothesis.extra import numpy as hynp -from numpy._core._rational_tests import rational import numpy as np from numpy import ma from numpy._core import sctypes +from numpy._core._rational_tests import rational from numpy._core.numerictypes import obj2sctype from numpy.exceptions import AxisError from numpy.random import rand, randint, randn from numpy.testing import ( HAS_REFCOUNT, + IS_PYPY, IS_WASM, assert_, assert_almost_equal, @@ -79,6 +80,13 @@ def test_negative_resize(self): with pytest.raises(ValueError, match=r"negative"): np.resize(A, new_shape=new_shape) + def test_unsigned_resize(self): + # ensure unsigned integer sizes don't lead to underflows + for dt_pair in [(np.int32, np.uint32), (np.int64, np.uint64)]: + arr = np.array([[23, 95], [66, 37]]) + assert_array_equal(np.resize(arr, dt_pair[0](1)), + np.resize(arr, dt_pair[1](1))) + def test_subclass(self): class MyArray(np.ndarray): __array_priority__ = 1. @@ -177,12 +185,6 @@ def test_reshape_shape_arg(self): shape = (3, 4) expected = arr.reshape(shape) - with pytest.raises( - TypeError, - match="You cannot specify 'newshape' and 'shape' " - "arguments at the same time." - ): - np.reshape(arr, shape=shape, newshape=shape) with pytest.raises( TypeError, match=r"reshape\(\) missing 1 required positional " @@ -195,9 +197,6 @@ def test_reshape_shape_arg(self): assert_equal(np.reshape(arr, shape, "C"), expected) assert_equal(np.reshape(arr, shape=shape), expected) assert_equal(np.reshape(arr, shape=shape, order="C"), expected) - with pytest.warns(DeprecationWarning): - actual = np.reshape(arr, newshape=shape) - assert_equal(actual, expected) def test_reshape_copy_arg(self): arr = np.arange(24).reshape(2, 3, 4) @@ -285,6 +284,10 @@ def test_size(self): assert_(np.size(A) == 6) assert_(np.size(A, 0) == 2) assert_(np.size(A, 1) == 3) + assert_(np.size(A, ()) == 1) + assert_(np.size(A, (0,)) == 2) + assert_(np.size(A, (1,)) == 3) + assert_(np.size(A, (0, 1)) == 6) def test_squeeze(self): A = [[[1, 1, 1], [2, 2, 2], [3, 3, 3]]] @@ -737,27 +740,29 @@ def test_bitwise_xor(self): class TestBoolArray: - def setup_method(self): + def _create_bool_arrays(self): # offset for simd tests - self.t = np.array([True] * 41, dtype=bool)[1::] - self.f = np.array([False] * 41, dtype=bool)[1::] - self.o = np.array([False] * 42, dtype=bool)[2::] - self.nm = self.f.copy() - self.im = self.t.copy() - self.nm[3] = True - self.nm[-2] = True - self.im[3] = False - self.im[-2] = False + t = np.array([True] * 41, dtype=bool)[1::] + f = np.array([False] * 41, dtype=bool)[1::] + o = np.array([False] * 42, dtype=bool)[2::] + nm = f.copy() + im = t.copy() + nm[3] = True + nm[-2] = True + im[3] = False + im[-2] = False + return t, f, o, nm, im def test_all_any(self): - assert_(self.t.all()) - assert_(self.t.any()) - assert_(not self.f.all()) - assert_(not self.f.any()) - assert_(self.nm.any()) - assert_(self.im.any()) - assert_(not self.nm.all()) - assert_(not self.im.all()) + t, f, _, nm, im = self._create_bool_arrays() + assert_(t.all()) + assert_(t.any()) + assert_(not f.all()) + assert_(not f.any()) + assert_(nm.any()) + assert_(im.any()) + assert_(not nm.all()) + assert_(not im.all()) # check bad element in all positions for i in range(256 - 7): d = np.array([False] * 256, dtype=bool)[7::] @@ -777,118 +782,103 @@ def test_all_any(self): assert_(not np.all(e), msg=f"{i!r}") def test_logical_not_abs(self): - assert_array_equal(~self.t, self.f) - assert_array_equal(np.abs(~self.t), self.f) - assert_array_equal(np.abs(~self.f), self.t) - assert_array_equal(np.abs(self.f), self.f) - assert_array_equal(~np.abs(self.f), self.t) - assert_array_equal(~np.abs(self.t), self.f) - assert_array_equal(np.abs(~self.nm), self.im) - np.logical_not(self.t, out=self.o) - assert_array_equal(self.o, self.f) - np.abs(self.t, out=self.o) - assert_array_equal(self.o, self.t) + t, f, o, nm, im = self._create_bool_arrays() + assert_array_equal(~t, f) + assert_array_equal(np.abs(~t), f) + assert_array_equal(np.abs(~f), t) + assert_array_equal(np.abs(f), f) + assert_array_equal(~np.abs(f), t) + assert_array_equal(~np.abs(t), f) + assert_array_equal(np.abs(~nm), im) + np.logical_not(t, out=o) + assert_array_equal(o, f) + np.abs(t, out=o) + assert_array_equal(o, t) def test_logical_and_or_xor(self): - assert_array_equal(self.t | self.t, self.t) - assert_array_equal(self.f | self.f, self.f) - assert_array_equal(self.t | self.f, self.t) - assert_array_equal(self.f | self.t, self.t) - np.logical_or(self.t, self.t, out=self.o) - assert_array_equal(self.o, self.t) - assert_array_equal(self.t & self.t, self.t) - assert_array_equal(self.f & self.f, self.f) - assert_array_equal(self.t & self.f, self.f) - assert_array_equal(self.f & self.t, self.f) - np.logical_and(self.t, self.t, out=self.o) - assert_array_equal(self.o, self.t) - assert_array_equal(self.t ^ self.t, self.f) - assert_array_equal(self.f ^ self.f, self.f) - assert_array_equal(self.t ^ self.f, self.t) - assert_array_equal(self.f ^ self.t, self.t) - np.logical_xor(self.t, self.t, out=self.o) - assert_array_equal(self.o, self.f) - - assert_array_equal(self.nm & self.t, self.nm) - assert_array_equal(self.im & self.f, False) - assert_array_equal(self.nm & True, self.nm) - assert_array_equal(self.im & False, self.f) - assert_array_equal(self.nm | self.t, self.t) - assert_array_equal(self.im | self.f, self.im) - assert_array_equal(self.nm | True, self.t) - assert_array_equal(self.im | False, self.im) - assert_array_equal(self.nm ^ self.t, self.im) - assert_array_equal(self.im ^ self.f, self.im) - assert_array_equal(self.nm ^ True, self.im) - assert_array_equal(self.im ^ False, self.im) + t, f, o, nm, im = self._create_bool_arrays() + assert_array_equal(t | t, t) + assert_array_equal(f | f, f) + assert_array_equal(t | f, t) + assert_array_equal(f | t, t) + np.logical_or(t, t, out=o) + assert_array_equal(o, t) + assert_array_equal(t & t, t) + assert_array_equal(f & f, f) + assert_array_equal(t & f, f) + assert_array_equal(f & t, f) + np.logical_and(t, t, out=o) + assert_array_equal(o, t) + assert_array_equal(t ^ t, f) + assert_array_equal(f ^ f, f) + assert_array_equal(t ^ f, t) + assert_array_equal(f ^ t, t) + np.logical_xor(t, t, out=o) + assert_array_equal(o, f) + + assert_array_equal(nm & t, nm) + assert_array_equal(im & f, False) + assert_array_equal(nm & True, nm) + assert_array_equal(im & False, f) + assert_array_equal(nm | t, t) + assert_array_equal(im | f, im) + assert_array_equal(nm | True, t) + assert_array_equal(im | False, im) + assert_array_equal(nm ^ t, im) + assert_array_equal(im ^ f, im) + assert_array_equal(nm ^ True, im) + assert_array_equal(im ^ False, im) class TestBoolCmp: - def setup_method(self): - self.f = np.ones(256, dtype=np.float32) - self.ef = np.ones(self.f.size, dtype=bool) - self.d = np.ones(128, dtype=np.float64) - self.ed = np.ones(self.d.size, dtype=bool) + def _create_data(self, dtype, size): + # generate data using given dtype and num for size of array + a = np.ones(size, dtype=dtype) + e = np.ones(a.size, dtype=bool) # generate values for all permutation of 256bit simd vectors s = 0 - for i in range(32): - self.f[s:s + 8] = [i & 2**x for x in range(8)] - self.ef[s:s + 8] = [(i & 2**x) != 0 for x in range(8)] - s += 8 - s = 0 - for i in range(16): - self.d[s:s + 4] = [i & 2**x for x in range(4)] - self.ed[s:s + 4] = [(i & 2**x) != 0 for x in range(4)] - s += 4 - - self.nf = self.f.copy() - self.nd = self.d.copy() - self.nf[self.ef] = np.nan - self.nd[self.ed] = np.nan - - self.inff = self.f.copy() - self.infd = self.d.copy() - self.inff[::3][self.ef[::3]] = np.inf - self.infd[::3][self.ed[::3]] = np.inf - self.inff[1::3][self.ef[1::3]] = -np.inf - self.infd[1::3][self.ed[1::3]] = -np.inf - self.inff[2::3][self.ef[2::3]] = np.nan - self.infd[2::3][self.ed[2::3]] = np.nan - self.efnonan = self.ef.copy() - self.efnonan[2::3] = False - self.ednonan = self.ed.copy() - self.ednonan[2::3] = False - - self.signf = self.f.copy() - self.signd = self.d.copy() - self.signf[self.ef] *= -1. - self.signd[self.ed] *= -1. - self.signf[1::6][self.ef[1::6]] = -np.inf - self.signd[1::6][self.ed[1::6]] = -np.inf + r = int(size / 32) + for i in range(int(size / 8)): + a[s:s + r] = [i & 2**x for x in range(r)] + e[s:s + r] = [(i & 2**x) != 0 for x in range(r)] + s += r + n = a.copy() + n[e] = np.nan + + inf = a.copy() + inf[::3][e[::3]] = np.inf + inf[1::3][e[1::3]] = -np.inf + inf[2::3][e[2::3]] = np.nan + enonan = e.copy() + enonan[2::3] = False + + sign = a.copy() + sign[e] *= -1. + sign[1::6][e[1::6]] = -np.inf # On RISC-V, many operations that produce NaNs, such as converting # a -NaN from f64 to f32, return a canonical NaN. The canonical # NaNs are always positive. See section 11.3 NaN Generation and # Propagation of the RISC-V Unprivileged ISA for more details. # We disable the float32 sign test on riscv64 for -np.nan as the sign # of the NaN will be lost when it's converted to a float32. - if platform.machine() != 'riscv64': - self.signf[3::6][self.ef[3::6]] = -np.nan - self.signd[3::6][self.ed[3::6]] = -np.nan - self.signf[4::6][self.ef[4::6]] = -0. - self.signd[4::6][self.ed[4::6]] = -0. + if not (dtype == np.float32 and platform.machine() == 'riscv64'): + sign[3::6][e[3::6]] = -np.nan + sign[4::6][e[4::6]] = -0. + return a, e, n, inf, enonan, sign def test_float(self): # offset for alignment test + f, ef, nf, inff, efnonan, signf = self._create_data(np.float32, 256) for i in range(4): - assert_array_equal(self.f[i:] > 0, self.ef[i:]) - assert_array_equal(self.f[i:] - 1 >= 0, self.ef[i:]) - assert_array_equal(self.f[i:] == 0, ~self.ef[i:]) - assert_array_equal(-self.f[i:] < 0, self.ef[i:]) - assert_array_equal(-self.f[i:] + 1 <= 0, self.ef[i:]) - r = self.f[i:] != 0 - assert_array_equal(r, self.ef[i:]) - r2 = self.f[i:] != np.zeros_like(self.f[i:]) - r3 = 0 != self.f[i:] + assert_array_equal(f[i:] > 0, ef[i:]) + assert_array_equal(f[i:] - 1 >= 0, ef[i:]) + assert_array_equal(f[i:] == 0, ~ef[i:]) + assert_array_equal(-f[i:] < 0, ef[i:]) + assert_array_equal(-f[i:] + 1 <= 0, ef[i:]) + r = f[i:] != 0 + assert_array_equal(r, ef[i:]) + r2 = f[i:] != np.zeros_like(f[i:]) + r3 = 0 != f[i:] assert_array_equal(r, r2) assert_array_equal(r, r3) # check bool == 0x1 @@ -897,24 +887,25 @@ def test_float(self): assert_array_equal(r3.view(np.int8), r3.astype(np.int8)) # isnan on amd64 takes the same code path - assert_array_equal(np.isnan(self.nf[i:]), self.ef[i:]) - assert_array_equal(np.isfinite(self.nf[i:]), ~self.ef[i:]) - assert_array_equal(np.isfinite(self.inff[i:]), ~self.ef[i:]) - assert_array_equal(np.isinf(self.inff[i:]), self.efnonan[i:]) - assert_array_equal(np.signbit(self.signf[i:]), self.ef[i:]) + assert_array_equal(np.isnan(nf[i:]), ef[i:]) + assert_array_equal(np.isfinite(nf[i:]), ~ef[i:]) + assert_array_equal(np.isfinite(inff[i:]), ~ef[i:]) + assert_array_equal(np.isinf(inff[i:]), efnonan[i:]) + assert_array_equal(np.signbit(signf[i:]), ef[i:]) def test_double(self): # offset for alignment test + d, ed, nd, infd, ednonan, signd = self._create_data(np.float64, 128) for i in range(2): - assert_array_equal(self.d[i:] > 0, self.ed[i:]) - assert_array_equal(self.d[i:] - 1 >= 0, self.ed[i:]) - assert_array_equal(self.d[i:] == 0, ~self.ed[i:]) - assert_array_equal(-self.d[i:] < 0, self.ed[i:]) - assert_array_equal(-self.d[i:] + 1 <= 0, self.ed[i:]) - r = self.d[i:] != 0 - assert_array_equal(r, self.ed[i:]) - r2 = self.d[i:] != np.zeros_like(self.d[i:]) - r3 = 0 != self.d[i:] + assert_array_equal(d[i:] > 0, ed[i:]) + assert_array_equal(d[i:] - 1 >= 0, ed[i:]) + assert_array_equal(d[i:] == 0, ~ed[i:]) + assert_array_equal(-d[i:] < 0, ed[i:]) + assert_array_equal(-d[i:] + 1 <= 0, ed[i:]) + r = d[i:] != 0 + assert_array_equal(r, ed[i:]) + r2 = d[i:] != np.zeros_like(d[i:]) + r3 = 0 != d[i:] assert_array_equal(r, r2) assert_array_equal(r, r3) # check bool == 0x1 @@ -923,11 +914,11 @@ def test_double(self): assert_array_equal(r3.view(np.int8), r3.astype(np.int8)) # isnan on amd64 takes the same code path - assert_array_equal(np.isnan(self.nd[i:]), self.ed[i:]) - assert_array_equal(np.isfinite(self.nd[i:]), ~self.ed[i:]) - assert_array_equal(np.isfinite(self.infd[i:]), ~self.ed[i:]) - assert_array_equal(np.isinf(self.infd[i:]), self.ednonan[i:]) - assert_array_equal(np.signbit(self.signd[i:]), self.ed[i:]) + assert_array_equal(np.isnan(nd[i:]), ed[i:]) + assert_array_equal(np.isfinite(nd[i:]), ~ed[i:]) + assert_array_equal(np.isfinite(infd[i:]), ~ed[i:]) + assert_array_equal(np.isinf(infd[i:]), ednonan[i:]) + assert_array_equal(np.signbit(signd[i:]), ed[i:]) class TestSeterr: @@ -1002,7 +993,7 @@ def test_floating_exceptions(self, typecode): if np.dtype(ftype).kind == 'f': # Get some extreme values for the type fi = np.finfo(ftype) - ft_tiny = fi._machar.tiny + ft_tiny = fi.tiny ft_max = fi.max ft_eps = fi.eps underflow = 'underflow' @@ -1011,7 +1002,7 @@ def test_floating_exceptions(self, typecode): # 'c', complex, corresponding real dtype rtype = type(ftype(0).real) fi = np.finfo(rtype) - ft_tiny = ftype(fi._machar.tiny) + ft_tiny = ftype(fi.tiny) ft_max = ftype(fi.max) ft_eps = ftype(fi.eps) # The complex types raise different exceptions @@ -1035,8 +1026,11 @@ def test_floating_exceptions(self, typecode): lambda a, b: a + b, ft_max, ft_max * ft_eps) self.assert_raises_fpe(overflow, lambda a, b: a - b, -ft_max, ft_max * ft_eps) - self.assert_raises_fpe(overflow, - np.power, ftype(2), ftype(2**fi.nexp)) + # On AIX, pow() with double does not raise the overflow exception, + # it returns inf. Long double is the same as double. + if sys.platform != 'aix' or typecode not in 'dDgG': + self.assert_raises_fpe(overflow, + np.power, ftype(2), ftype(2**fi.nexp)) self.assert_raises_fpe(divbyzero, lambda a, b: a / b, ftype(1), ftype(0)) self.assert_raises_fpe( @@ -1636,6 +1630,7 @@ def test_failed_itemsetting(self): with pytest.raises(ValueError): np.fromiter(iterable, dtype=np.dtype((int, 2))) + class TestNonzero: def test_nonzero_trivial(self): assert_equal(np.count_nonzero(np.array([])), 0) @@ -1665,8 +1660,10 @@ def test_nonzero_onedim(self): # x = np.array([(1, 2), (0, 0), (1, 1), (-1, 3), (0, 7)], # dtype=[('a', 'i4'), ('b', 'i2')]) - x = np.array([(1, 2, -5, -3), (0, 0, 2, 7), (1, 1, 0, 1), (-1, 3, 1, 0), (0, 7, 0, 4)], - dtype=[('a', 'i4'), ('b', 'i2'), ('c', 'i1'), ('d', 'i8')]) + x = np.array( + [(1, 2, -5, -3), (0, 0, 2, 7), (1, 1, 0, 1), (-1, 3, 1, 0), (0, 7, 0, 4)], + dtype=[('a', 'i4'), ('b', 'i2'), ('c', 'i1'), ('d', 'i8')] + ) assert_equal(np.count_nonzero(x['a']), 3) assert_equal(np.count_nonzero(x['b']), 4) assert_equal(np.count_nonzero(x['c']), 3) @@ -2000,7 +1997,9 @@ def test_boolean(self): g1 = randint(0, 5, size=15) g2 = randint(0, 8, size=15) V[g1, g2] = -V[g1, g2] - assert_((np.array([a[0][V > 0], a[1][V > 0], a[2][V > 0]]) == a[:, V > 0]).all()) + assert_( + (np.array([a[0][V > 0], a[1][V > 0], a[2][V > 0]]) == a[:, V > 0]).all() + ) def test_boolean_edgecase(self): a = np.array([], dtype='int32') @@ -2266,7 +2265,10 @@ def test_array_equiv(self): res = np.array_equiv(np.array([1, 2]), np.array([[1], [2]])) assert_(not res) assert_(type(res) is bool) - res = np.array_equiv(np.array([1, 2]), np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])) + res = np.array_equiv( + np.array([1, 2]), + np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]), + ) assert_(not res) assert_(type(res) is bool) @@ -2305,9 +2307,8 @@ def assert_array_strict_equal(x, y): class TestClip: - def setup_method(self): - self.nr = 5 - self.nc = 3 + nr = 5 + nc = 3 def fastclip(self, a, m, M, out=None, **kwargs): return a.clip(m, M, out=out, **kwargs) @@ -2837,16 +2838,12 @@ def test_clip_scalar_nan_propagation(self, arr, amin, amax): actual = np.clip(arr, amin, amax) assert_equal(actual, expected) - @pytest.mark.xfail(reason="propagation doesn't match spec") @pytest.mark.parametrize("arr, amin, amax", [ (np.array([1] * 10, dtype='m8'), np.timedelta64('NaT'), np.zeros(10, dtype=np.int32)), ]) - @pytest.mark.filterwarnings("ignore::DeprecationWarning") def test_NaT_propagation(self, arr, amin, amax): - # NOTE: the expected function spec doesn't - # propagate NaT, but clip() now does expected = np.minimum(np.maximum(arr, amin), amax) actual = np.clip(arr, amin, amax) assert_equal(actual, expected) @@ -2940,6 +2937,7 @@ def test_out_of_bound_pyints(self, dtype, min, max): if max is not None: assert (c <= max).all() + class TestAllclose: rtol = 1e-5 atol = 1e-8 @@ -3125,7 +3123,9 @@ def tst_isclose_allclose(self, x, y): if np.isscalar(x) and np.isscalar(y): assert_(np.isclose(x, y) == np.allclose(x, y), msg=msg2 % (x, y)) else: - assert_array_equal(np.isclose(x, y).all(), np.allclose(x, y), msg % (x, y)) + assert_array_equal( + np.isclose(x, y).all(), np.allclose(x, y), msg % (x, y) + ) def test_ip_all_isclose(self): self._setup() @@ -3219,7 +3219,8 @@ def test_tol_warnings(self): for i in b: for j in b: - # Making sure that i and j are not both numbers, because that won't create a warning + # Making sure that i and j are not both numbers, + # because that won't create a warning if (i == 1) and (j == 1): continue @@ -3229,49 +3230,55 @@ def test_tol_warnings(self): c = np.isclose(a, a, atol=i, rtol=j) assert len(w) == 1 assert issubclass(w[-1].category, RuntimeWarning) - assert f"One of rtol or atol is not valid, atol: {i}, rtol: {j}" in str(w[-1].message) + expected = f"One of rtol or atol is not valid, atol: {i}, rtol: {j}" + assert expected in str(w[-1].message) class TestStdVar: - def setup_method(self): - self.A = np.array([1, -1, 1, -1]) - self.real_var = 1 + def _create_data(self): + A = np.array([1, -1, 1, -1]) + real_var = 1 + return A, real_var def test_basic(self): - assert_almost_equal(np.var(self.A), self.real_var) - assert_almost_equal(np.std(self.A)**2, self.real_var) + A, real_var = self._create_data() + assert_almost_equal(np.var(A), real_var) + assert_almost_equal(np.std(A)**2, real_var) def test_scalars(self): assert_equal(np.var(1), 0) assert_equal(np.std(1), 0) def test_ddof1(self): - assert_almost_equal(np.var(self.A, ddof=1), - self.real_var * len(self.A) / (len(self.A) - 1)) - assert_almost_equal(np.std(self.A, ddof=1)**2, - self.real_var * len(self.A) / (len(self.A) - 1)) + A, real_var = self._create_data() + assert_almost_equal(np.var(A, ddof=1), + real_var * len(A) / (len(A) - 1)) + assert_almost_equal(np.std(A, ddof=1)**2, + real_var * len(A) / (len(A) - 1)) def test_ddof2(self): - assert_almost_equal(np.var(self.A, ddof=2), - self.real_var * len(self.A) / (len(self.A) - 2)) - assert_almost_equal(np.std(self.A, ddof=2)**2, - self.real_var * len(self.A) / (len(self.A) - 2)) + A, real_var = self._create_data() + assert_almost_equal(np.var(A, ddof=2), + real_var * len(A) / (len(A) - 2)) + assert_almost_equal(np.std(A, ddof=2)**2, + real_var * len(A) / (len(A) - 2)) def test_correction(self): + A, _ = self._create_data() assert_almost_equal( - np.var(self.A, correction=1), np.var(self.A, ddof=1) + np.var(A, correction=1), np.var(A, ddof=1) ) assert_almost_equal( - np.std(self.A, correction=1), np.std(self.A, ddof=1) + np.std(A, correction=1), np.std(A, ddof=1) ) err_msg = "ddof and correction can't be provided simultaneously." with assert_raises_regex(ValueError, err_msg): - np.var(self.A, ddof=1, correction=0) + np.var(A, ddof=1, correction=0) with assert_raises_regex(ValueError, err_msg): - np.std(self.A, ddof=1, correction=1) + np.std(A, ddof=1, correction=1) def test_out_scalar(self): d = np.arange(10) @@ -3300,26 +3307,22 @@ def test_scalars(self): class TestCreationFuncs: - # Test ones, zeros, empty and full. - - def setup_method(self): - dtypes = {np.dtype(tp) for tp in itertools.chain(*sctypes.values())} - # void, bytes, str - variable_sized = {tp for tp in dtypes if tp.str.endswith('0')} + def check_function(self, func, fill_value=None): + dtypes_info = {np.dtype(tp) for tp in itertools.chain(*sctypes.values())} keyfunc = lambda dtype: dtype.str - self.dtypes = sorted(dtypes - variable_sized | + variable_sized = {tp for tp in dtypes_info if tp.str.endswith('0')} + dtypes = sorted(dtypes_info - variable_sized | {np.dtype(tp.str.replace("0", str(i))) for tp in variable_sized for i in range(1, 10)}, key=keyfunc) - self.dtypes += [type(dt) for dt in sorted(dtypes, key=keyfunc)] - self.orders = {'C': 'c_contiguous', 'F': 'f_contiguous'} - self.ndims = 10 + dtypes += [type(dt) for dt in sorted(dtypes_info, key=keyfunc)] + orders = {'C': 'c_contiguous', 'F': 'f_contiguous'} + ndims = 10 - def check_function(self, func, fill_value=None): par = ((0, 1, 2), - range(self.ndims), - self.orders, - self.dtypes) + range(ndims), + orders, + dtypes) fill_kwarg = {} if fill_value is not None: fill_kwarg = {'fill_value': fill_value} @@ -3345,7 +3348,7 @@ def check_function(self, func, fill_value=None): assert_equal(arr.dtype, np.dtype(dtype_str)) else: assert_equal(arr.dtype, np.dtype(dtype.type)) - assert_(getattr(arr.flags, self.orders[order])) + assert_(getattr(arr.flags, orders[order])) if fill_value is not None: if arr.dtype.str.startswith('|S'): @@ -3381,36 +3384,39 @@ def test_for_reference_leak(self): np.full([dim] * 10, 0) assert_(sys.getrefcount(dim) == beg) + @pytest.mark.skipif(sys.flags.optimize == 2, reason="Python running -OO") + @pytest.mark.xfail(IS_PYPY, reason="PyPy does not modify tp_doc") + @pytest.mark.parametrize("func", [np.empty, np.zeros, np.ones, np.full]) + def test_signatures(self, func): + sig = inspect.signature(func) + params = sig.parameters + + assert len(params) in {5, 6} + + assert 'shape' in params + assert params["shape"].kind is inspect.Parameter.POSITIONAL_OR_KEYWORD + assert params["shape"].default is inspect.Parameter.empty + + assert 'dtype' in params + assert params["dtype"].kind is inspect.Parameter.POSITIONAL_OR_KEYWORD + assert params["dtype"].default is None + + assert 'order' in params + assert params["order"].kind is inspect.Parameter.POSITIONAL_OR_KEYWORD + assert params["order"].default == "C" + + assert 'device' in params + assert params["device"].kind is inspect.Parameter.KEYWORD_ONLY + assert params["device"].default is None + + assert 'like' in params + assert params["like"].kind is inspect.Parameter.KEYWORD_ONLY + assert params["like"].default is None + class TestLikeFuncs: '''Test ones_like, zeros_like, empty_like and full_like''' - def setup_method(self): - self.data = [ - # Array scalars - (np.array(3.), None), - (np.array(3), 'f8'), - # 1D arrays - (np.arange(6, dtype='f4'), None), - (np.arange(6), 'c16'), - # 2D C-layout arrays - (np.arange(6).reshape(2, 3), None), - (np.arange(6).reshape(3, 2), 'i1'), - # 2D F-layout arrays - (np.arange(6).reshape((2, 3), order='F'), None), - (np.arange(6).reshape((3, 2), order='F'), 'i1'), - # 3D C-layout arrays - (np.arange(24).reshape(2, 3, 4), None), - (np.arange(24).reshape(4, 3, 2), 'f4'), - # 3D F-layout arrays - (np.arange(24).reshape((2, 3, 4), order='F'), None), - (np.arange(24).reshape((4, 3, 2), order='F'), 'f4'), - # 3D non-C/F-layout arrays - (np.arange(24).reshape(2, 3, 4).swapaxes(0, 1), None), - (np.arange(24).reshape(4, 3, 2).swapaxes(0, 1), '?'), - ] - self.shapes = [(), (5,), (5, 6,), (5, 6, 7,)] - def compare_array_value(self, dz, value, fill_value): if value is not None: if fill_value: @@ -3423,11 +3429,36 @@ def compare_array_value(self, dz, value, fill_value): assert_(np.all(dz == value)) def check_like_function(self, like_function, value, fill_value=False): + data = [ + # Array scalars + (np.array(3.), None), + (np.array(3), 'f8'), + # 1D arrays + (np.arange(6, dtype='f4'), None), + (np.arange(6), 'c16'), + # 2D C-layout arrays + (np.arange(6).reshape(2, 3), None), + (np.arange(6).reshape(3, 2), 'i1'), + # 2D F-layout arrays + (np.arange(6).reshape((2, 3), order='F'), None), + (np.arange(6).reshape((3, 2), order='F'), 'i1'), + # 3D C-layout arrays + (np.arange(24).reshape(2, 3, 4), None), + (np.arange(24).reshape(4, 3, 2), 'f4'), + # 3D F-layout arrays + (np.arange(24).reshape((2, 3, 4), order='F'), None), + (np.arange(24).reshape((4, 3, 2), order='F'), 'f4'), + # 3D non-C/F-layout arrays + (np.arange(24).reshape(2, 3, 4).swapaxes(0, 1), None), + (np.arange(24).reshape(4, 3, 2).swapaxes(0, 1), '?'), + ] + shapes = [(), (5,), (5, 6,), (5, 6, 7,)] + if fill_value: fill_kwarg = {'fill_value': value} else: fill_kwarg = {} - for d, dtype in self.data: + for d, dtype in data: # default (K) order, dtype dz = like_function(d, dtype=dtype, **fill_kwarg) assert_equal(dz.shape, d.shape) @@ -3475,7 +3506,7 @@ def check_like_function(self, like_function, value, fill_value=False): self.compare_array_value(dz, value, fill_value) # Test the 'shape' parameter - for s in self.shapes: + for s in shapes: for o in 'CFA': sz = like_function(d, dtype=dtype, shape=s, order=o, **fill_kwarg) @@ -3655,6 +3686,16 @@ def test_mode(self): with assert_raises(TypeError): np.convolve(d, k, mode=None) + def test_convolve_empty_input_error_message(self): + """ + Test that convolve raises the correct error message when inputs are empty. + Regression test for gh-30272 (variable swapping bug). + """ + with pytest.raises(ValueError, match="a cannot be empty"): + np.convolve(np.array([]), np.array([1, 2])) + + with pytest.raises(ValueError, match="v cannot be empty"): + np.convolve(np.array([1, 2]), np.array([])) class TestArgwhere: @@ -4185,6 +4226,19 @@ def test_shape_mismatch_error_message(self): r"arg 2 with shape \(2,\)"): np.broadcast([[1, 2, 3]], [[4], [5]], [6, 7]) + @pytest.mark.skipif(sys.flags.optimize == 2, reason="Python running -OO") + @pytest.mark.xfail(IS_PYPY, reason="PyPy does not modify tp_doc") + def test_signatures(self): + sig_new = inspect.signature(np.broadcast) + assert len(sig_new.parameters) == 1 + assert "arrays" in sig_new.parameters + assert sig_new.parameters["arrays"].kind == inspect.Parameter.VAR_POSITIONAL + + sig_reset = inspect.signature(np.broadcast.reset) + assert len(sig_reset.parameters) == 1 + assert "self" in sig_reset.parameters + assert sig_reset.parameters["self"].kind == inspect.Parameter.POSITIONAL_ONLY + class TestKeepdims: @@ -4211,7 +4265,8 @@ def test_zero_dimension(self): def test_zero_dimensional(self): # gh-12130 arr_0d = np.array(1) - ret = np.tensordot(arr_0d, arr_0d, ([], [])) # contracting no axes is well defined + # contracting no axes is well defined + ret = np.tensordot(arr_0d, arr_0d, ([], [])) assert_array_equal(ret, arr_0d) diff --git a/numpy/_core/tests/test_numerictypes.py b/numpy/_core/tests/test_numerictypes.py index c9a2ac06472c..5763a964c41d 100644 --- a/numpy/_core/tests/test_numerictypes.py +++ b/numpy/_core/tests/test_numerictypes.py @@ -347,17 +347,16 @@ def test_assign(self): class TestMultipleFields: - def setup_method(self): - self.ary = np.array([(1, 2, 3, 4), (5, 6, 7, 8)], dtype='i4,f4,i2,c8') - def _bad_call(self): - return self.ary['f0', 'f1'] + ary = np.array([(1, 2, 3, 4), (5, 6, 7, 8)], dtype='i4,f4,i2,c8') + return ary['f0', 'f1'] def test_no_tuple(self): assert_raises(IndexError, self._bad_call) def test_return(self): - res = self.ary[['f0', 'f2']].tolist() + ary = np.array([(1, 2, 3, 4), (5, 6, 7, 8)], dtype='i4,f4,i2,c8') + res = ary[['f0', 'f2']].tolist() assert_(res == [(1, 3), (5, 7)]) @@ -617,6 +616,35 @@ def test_names_are_undersood_by_dtype(self, t): assert np.dtype(t.__name__).type is t +class TestScalarTypeOrder: + @pytest.mark.parametrize(('a', 'b'), [ + # signedinteger + (np.byte, np.short), + (np.short, np.intc), + (np.intc, np.long), + (np.long, np.longlong), + # unsignedinteger + (np.ubyte, np.ushort), + (np.ushort, np.uintc), + (np.uintc, np.ulong), + (np.ulong, np.ulonglong), + # floating + (np.half, np.single), + (np.single, np.double), + (np.double, np.longdouble), + # complexfloating + (np.csingle, np.cdouble), + (np.cdouble, np.clongdouble), + # flexible + (np.bytes_, np.str_), + (np.str_, np.void), + # bouncy castles + (np.datetime64, np.timedelta64), + ]) + def test_stable_ordering(self, a: type[np.generic], b: type[np.generic]): + assert np.ScalarType.index(a) <= np.ScalarType.index(b) + + class TestBoolDefinition: def test_bool_definition(self): assert nt.bool is np.bool diff --git a/numpy/_core/tests/test_overrides.py b/numpy/_core/tests/test_overrides.py index b0d73375ed10..ebcf2f0ce112 100644 --- a/numpy/_core/tests/test_overrides.py +++ b/numpy/_core/tests/test_overrides.py @@ -550,7 +550,7 @@ def __array_function__(self, func, types, args, kwargs): class TestArrayLike: - def setup_method(self): + def _create_MyArray(self): class MyArray: def __init__(self, function=None): self.function = function @@ -563,20 +563,22 @@ def __array_function__(self, func, types, args, kwargs): return NotImplemented return my_func(*args, **kwargs) - self.MyArray = MyArray + return MyArray + def _create_MyNoArrayFunctionArray(self): class MyNoArrayFunctionArray: def __init__(self, function=None): self.function = function - self.MyNoArrayFunctionArray = MyNoArrayFunctionArray + return MyNoArrayFunctionArray + def _create_MySubclass(self): class MySubclass(np.ndarray): def __array_function__(self, func, types, args, kwargs): result = super().__array_function__(func, types, args, kwargs) return result.view(self.__class__) - self.MySubclass = MySubclass + return MySubclass def add_method(self, name, arr_class, enable_value_error=False): def _definition(*args, **kwargs): @@ -593,9 +595,10 @@ def func_args(*args, **kwargs): return args, kwargs def test_array_like_not_implemented(self): - self.add_method('array', self.MyArray) + MyArray = self._create_MyArray() + self.add_method('array', MyArray) - ref = self.MyArray.array() + ref = MyArray.array() with assert_raises_regex(TypeError, 'no implementation found'): array_like = np.asarray(1, like=ref) @@ -646,15 +649,16 @@ def test_nep35_functions_as_array_functions(self,): @pytest.mark.parametrize('function, args, kwargs', _array_tests) @pytest.mark.parametrize('numpy_ref', [True, False]) def test_array_like(self, function, args, kwargs, numpy_ref): - self.add_method('array', self.MyArray) - self.add_method(function, self.MyArray) + MyArray = self._create_MyArray() + self.add_method('array', MyArray) + self.add_method(function, MyArray) np_func = getattr(np, function) - my_func = getattr(self.MyArray, function) + my_func = getattr(MyArray, function) if numpy_ref is True: ref = np.array(1) else: - ref = self.MyArray.array() + ref = MyArray.array() like_args = tuple(a() if callable(a) else a for a in args) array_like = np_func(*like_args, **kwargs, like=ref) @@ -672,19 +676,20 @@ def test_array_like(self, function, args, kwargs, numpy_ref): assert_equal(array_like, np_arr) else: - assert type(array_like) is self.MyArray + assert type(array_like) is MyArray assert array_like.function is my_func @pytest.mark.parametrize('function, args, kwargs', _array_tests) @pytest.mark.parametrize('ref', [1, [1], "MyNoArrayFunctionArray"]) def test_no_array_function_like(self, function, args, kwargs, ref): - self.add_method('array', self.MyNoArrayFunctionArray) - self.add_method(function, self.MyNoArrayFunctionArray) + MyNoArrayFunctionArray = self._create_MyNoArrayFunctionArray() + self.add_method('array', MyNoArrayFunctionArray) + self.add_method(function, MyNoArrayFunctionArray) np_func = getattr(np, function) # Instantiate ref if it's the MyNoArrayFunctionArray class if ref == "MyNoArrayFunctionArray": - ref = self.MyNoArrayFunctionArray.array() + ref = MyNoArrayFunctionArray.array() like_args = tuple(a() if callable(a) else a for a in args) @@ -694,11 +699,12 @@ def test_no_array_function_like(self, function, args, kwargs, ref): @pytest.mark.parametrize('function, args, kwargs', _array_tests) def test_subclass(self, function, args, kwargs): - ref = np.array(1).view(self.MySubclass) + MySubclass = self._create_MySubclass() + ref = np.array(1).view(MySubclass) np_func = getattr(np, function) like_args = tuple(a() if callable(a) else a for a in args) array_like = np_func(*like_args, **kwargs, like=ref) - assert type(array_like) is self.MySubclass + assert type(array_like) is MySubclass if np_func is np.empty: return np_args = tuple(a() if callable(a) else a for a in args) @@ -707,13 +713,14 @@ def test_subclass(self, function, args, kwargs): @pytest.mark.parametrize('numpy_ref', [True, False]) def test_array_like_fromfile(self, numpy_ref): - self.add_method('array', self.MyArray) - self.add_method("fromfile", self.MyArray) + MyArray = self._create_MyArray() + self.add_method('array', MyArray) + self.add_method("fromfile", MyArray) if numpy_ref is True: ref = np.array(1) else: - ref = self.MyArray.array() + ref = MyArray.array() data = np.random.random(5) @@ -728,13 +735,14 @@ def test_array_like_fromfile(self, numpy_ref): assert_equal(np_res, data) assert_equal(array_like, np_res) else: - assert type(array_like) is self.MyArray - assert array_like.function is self.MyArray.fromfile + assert type(array_like) is MyArray + assert array_like.function is MyArray.fromfile def test_exception_handling(self): - self.add_method('array', self.MyArray, enable_value_error=True) + MyArray = self._create_MyArray() + self.add_method('array', MyArray, enable_value_error=True) - ref = self.MyArray.array() + ref = MyArray.array() with assert_raises(TypeError): # Raises the error about `value_error` being invalid first @@ -742,8 +750,9 @@ def test_exception_handling(self): @pytest.mark.parametrize('function, args, kwargs', _array_tests) def test_like_as_none(self, function, args, kwargs): - self.add_method('array', self.MyArray) - self.add_method(function, self.MyArray) + MyArray = self._create_MyArray() + self.add_method('array', MyArray) + self.add_method(function, MyArray) np_func = getattr(np, function) like_args = tuple(a() if callable(a) else a for a in args) diff --git a/numpy/_core/tests/test_print.py b/numpy/_core/tests/test_print.py index d99b2794d7ca..95a177b57a7d 100644 --- a/numpy/_core/tests/test_print.py +++ b/numpy/_core/tests/test_print.py @@ -117,6 +117,7 @@ def _test_redirected_print(x, tp, ref=None): err_msg=f'print failed for type{tp}') +@pytest.mark.thread_unsafe(reason="sys.stdout not thread-safe") @pytest.mark.parametrize('tp', [np.float32, np.double, np.longdouble]) def test_float_type_print(tp): """Check formatting when using print """ @@ -133,6 +134,7 @@ def test_float_type_print(tp): _test_redirected_print(1e16, tp, ref) +@pytest.mark.thread_unsafe(reason="sys.stdout not thread-safe") @pytest.mark.parametrize('tp', [np.complex64, np.cdouble, np.clongdouble]) def test_complex_type_print(tp): """Check formatting when using print """ diff --git a/numpy/_core/tests/test_records.py b/numpy/_core/tests/test_records.py index b4b93aee4026..7ed6ea7687ff 100644 --- a/numpy/_core/tests/test_records.py +++ b/numpy/_core/tests/test_records.py @@ -359,26 +359,26 @@ def test_tofile_fromfile(self): class TestRecord: - def setup_method(self): - self.data = np.rec.fromrecords([(1, 2, 3), (4, 5, 6)], + def _create_data(self): + return np.rec.fromrecords([(1, 2, 3), (4, 5, 6)], dtype=[("col1", " int32 max diff --git a/numpy/_core/tests/test_scalar_ctors.py b/numpy/_core/tests/test_scalar_ctors.py index be3ef0459c82..05ede01b5973 100644 --- a/numpy/_core/tests/test_scalar_ctors.py +++ b/numpy/_core/tests/test_scalar_ctors.py @@ -4,11 +4,7 @@ import pytest import numpy as np -from numpy.testing import ( - assert_almost_equal, - assert_equal, - assert_warns, -) +from numpy.testing import assert_almost_equal, assert_equal class TestFromString: @@ -29,7 +25,7 @@ def test_floating_overflow(self): assert_equal(fsingle, np.inf) fdouble = np.double('1e10000') assert_equal(fdouble, np.inf) - flongdouble = assert_warns(RuntimeWarning, np.longdouble, '1e10000') + flongdouble = pytest.warns(RuntimeWarning, np.longdouble, '1e10000') assert_equal(flongdouble, np.inf) fhalf = np.half('-1e10000') @@ -38,7 +34,7 @@ def test_floating_overflow(self): assert_equal(fsingle, -np.inf) fdouble = np.double('-1e10000') assert_equal(fdouble, -np.inf) - flongdouble = assert_warns(RuntimeWarning, np.longdouble, '-1e10000') + flongdouble = pytest.warns(RuntimeWarning, np.longdouble, '-1e10000') assert_equal(flongdouble, -np.inf) diff --git a/numpy/_core/tests/test_scalar_methods.py b/numpy/_core/tests/test_scalar_methods.py index 2d508a08bb4d..b993a8f3df29 100644 --- a/numpy/_core/tests/test_scalar_methods.py +++ b/numpy/_core/tests/test_scalar_methods.py @@ -2,15 +2,17 @@ Test the scalar constructors, which also do type-coercion """ import fractions +import inspect import platform +import sys import types -from typing import Any +from typing import Any, Literal import pytest import numpy as np from numpy._core import sctypes -from numpy.testing import assert_equal, assert_raises +from numpy.testing import IS_PYPY, assert_equal, assert_raises class TestAsIntegerRatio: @@ -171,8 +173,12 @@ def test_abc_non_numeric(self, cls: type[np.generic]) -> None: @pytest.mark.parametrize("code", np.typecodes["All"]) def test_concrete(self, code: str) -> None: cls = np.dtype(code).type - with pytest.raises(TypeError): - cls[Any] + if cls in {np.bool, np.datetime64}: + # these are intentionally subscriptable + assert cls[Any] + else: + with pytest.raises(TypeError): + cls[Any] @pytest.mark.parametrize("arg_len", range(4)) def test_subscript_tuple(self, arg_len: int) -> None: @@ -186,6 +192,10 @@ def test_subscript_tuple(self, arg_len: int) -> None: def test_subscript_scalar(self) -> None: assert np.number[Any] + @pytest.mark.parametrize("subscript", [Literal[True], Literal[False]]) + def test_subscript_bool(self, subscript: Literal[True, False]) -> None: + assert isinstance(np.bool[subscript], types.GenericAlias) + class TestBitCount: # derived in part from the cpython test "test_bit_count" @@ -244,3 +254,75 @@ def test_array_wrap(scalar): arr1d = np.array([3], dtype=np.int8) assert scalar.__array_wrap__(arr1d) is arr1d assert scalar.__array_wrap__(arr1d, None, True) is arr1d + + +@pytest.mark.skipif(sys.flags.optimize == 2, reason="Python running -OO") +@pytest.mark.skipif(IS_PYPY, reason="PyPy does not modify tp_doc") +class TestSignature: + # test that scalar types have a valid __text_signature__ or __signature__ set + @pytest.mark.parametrize( + "sctype", + [ + *sctypes["int"], + *sctypes["uint"], + *sctypes["float"], + *sctypes["complex"], + *sctypes["others"], + np.datetime64, + np.timedelta64, + ], + ) + def test_constructor_signatures(self, sctype: type[np.generic]): + try: + sig = inspect.signature(sctype) + except ValueError: + pytest.fail(f"missing signature: {sctype}") + + assert sig.parameters + + @pytest.mark.parametrize( + "sctype", + [np.integer, *sctypes["int"], *sctypes["uint"], *sctypes["float"]], + ) + def test_method_signatures_is_integer(self, sctype: type[np.integer | np.floating]): + try: + sig = inspect.signature(sctype.is_integer) + except ValueError: + pytest.fail(f"missing signature: {sctype.__name__}.is_integer") + + assert len(sig.parameters) == 1 + assert sig.parameters["self"].kind == inspect.Parameter.POSITIONAL_ONLY + + @pytest.mark.parametrize("sctype", sctypes["float"]) + def test_method_signatures_as_integer_ratio(self, sctype: type[np.floating]): + try: + sig = inspect.signature(sctype.as_integer_ratio) + except ValueError: + pytest.fail(f"missing signature: {sctype.__name__}.as_integer_ratio") + + assert len(sig.parameters) == 1 + assert sig.parameters["self"].kind == inspect.Parameter.POSITIONAL_ONLY + + @pytest.mark.parametrize( + "method_name", + [ + "__array_namespace__", "__copy__", "__deepcopy__", "all", "any", "argmax", + "argmin", "argsort", "astype", "byteswap", "choose", "clip", "compress", + "conj", "conjugate", "copy", "cumprod", "cumsum", "diagonal", "dump", + "dumps", "fill", "flatten", "getfield", "item", "max", "mean", "min", + "nonzero", "prod", "put", "ravel", "repeat", "reshape", "resize", "round", + "searchsorted", "setfield", "setflags", "sort", "squeeze", "std", "sum", + "swapaxes", "take", "to_device", "tobytes", "tofile", "tolist", "trace", + "transpose", "var", "view", + ], + ) + def test_array_scalar_method_signatures(self, method_name: str): + # methods shared by np.generic and np.ndarray should have the same signature + fn_generic = getattr(np.generic, method_name) + sig_generic = inspect.signature(fn_generic) + assert "self" in sig_generic.parameters + assert sig_generic.parameters["self"].kind is inspect.Parameter.POSITIONAL_ONLY + + fn_ndarray = getattr(np.ndarray, method_name) + sig_ndarray = inspect.signature(fn_ndarray) + assert sig_generic == sig_ndarray diff --git a/numpy/_core/tests/test_scalarbuffer.py b/numpy/_core/tests/test_scalarbuffer.py index 4d6b5bdd73fc..4d2744b85e53 100644 --- a/numpy/_core/tests/test_scalarbuffer.py +++ b/numpy/_core/tests/test_scalarbuffer.py @@ -2,10 +2,10 @@ Test scalar buffer interface adheres to PEP 3118 """ import pytest -from numpy._core._multiarray_tests import get_buffer_info -from numpy._core._rational_tests import rational import numpy as np +from numpy._core._multiarray_tests import get_buffer_info +from numpy._core._rational_tests import rational from numpy.testing import assert_, assert_equal, assert_raises # PEP3118 format strings for native (standard alignment and byteorder) types @@ -128,8 +128,8 @@ def test_str_ucs4(self, s): s = np.str_(s) # only our subclass implements the buffer protocol # all the same, characters always encode as ucs4 - expected = {'strides': (), 'itemsize': 8, 'ndim': 0, 'shape': (), 'format': '2w', - 'readonly': True} + expected = {'strides': (), 'itemsize': 8, 'ndim': 0, 'shape': (), + 'format': '2w', 'readonly': True} v = memoryview(s) assert self._as_dict(v) == expected diff --git a/numpy/_core/tests/test_scalarmath.py b/numpy/_core/tests/test_scalarmath.py index fc37897bb7f7..bfbc9a54cbfe 100644 --- a/numpy/_core/tests/test_scalarmath.py +++ b/numpy/_core/tests/test_scalarmath.py @@ -9,9 +9,9 @@ from hypothesis import given, settings from hypothesis.extra import numpy as hynp from hypothesis.strategies import sampled_from -from numpy._core._rational_tests import rational import numpy as np +from numpy._core._rational_tests import rational from numpy._utils import _pep440 from numpy.exceptions import ComplexWarning from numpy.testing import ( @@ -23,7 +23,6 @@ assert_equal, assert_raises, check_support_sve, - suppress_warnings, ) types = [np.bool, np.byte, np.ubyte, np.short, np.ushort, np.intc, np.uintc, @@ -369,12 +368,7 @@ def test_float_modulus_corner_cases(self): assert_(rem >= -b, f'dt: {dt}') # Check nans, inf - with suppress_warnings() as sup: - sup.filter(RuntimeWarning, "invalid value encountered in remainder") - sup.filter(RuntimeWarning, "divide by zero encountered in remainder") - sup.filter(RuntimeWarning, "divide by zero encountered in floor_divide") - sup.filter(RuntimeWarning, "divide by zero encountered in divmod") - sup.filter(RuntimeWarning, "invalid value encountered in divmod") + with warnings.catch_warnings(), np.errstate(all='ignore'): for dt in np.typecodes['Float']: fone = np.array(1.0, dtype=dt) fzer = np.array(0.0, dtype=dt) @@ -522,21 +516,17 @@ def test_int_from_infinite_longdouble(self): # gh-627 x = np.longdouble(np.inf) assert_raises(OverflowError, int, x) - with suppress_warnings() as sup: - sup.record(ComplexWarning) + with pytest.warns(ComplexWarning): x = np.clongdouble(np.inf) assert_raises(OverflowError, int, x) - assert_equal(len(sup.log), 1) @pytest.mark.skipif(not IS_PYPY, reason="Test is PyPy only (gh-9972)") def test_int_from_infinite_longdouble___int__(self): x = np.longdouble(np.inf) assert_raises(OverflowError, x.__int__) - with suppress_warnings() as sup: - sup.record(ComplexWarning) + with pytest.warns(ComplexWarning): x = np.clongdouble(np.inf) assert_raises(OverflowError, x.__int__) - assert_equal(len(sup.log), 1) @pytest.mark.skipif(np.finfo(np.double) == np.finfo(np.longdouble), reason="long double is same as double") @@ -731,8 +721,8 @@ def test_exceptions(self): def test_result(self): types = np.typecodes['AllInteger'] + np.typecodes['AllFloat'] - with suppress_warnings() as sup: - sup.filter(RuntimeWarning) + with warnings.catch_warnings(): + warnings.simplefilter('ignore', RuntimeWarning) for dt in types: a = np.ones((), dtype=dt)[()] if dt in np.typecodes['UnsignedInteger']: @@ -749,8 +739,8 @@ def test_exceptions(self): def test_result(self): types = np.typecodes['AllInteger'] + np.typecodes['AllFloat'] - with suppress_warnings() as sup: - sup.filter(RuntimeWarning) + with warnings.catch_warnings(): + warnings.simplefilter('ignore', RuntimeWarning) for dt in types: a = np.ones((), dtype=dt)[()] assert_equal(operator.sub(a, a), 0) @@ -771,8 +761,8 @@ def _test_abs_func(self, absfunc, test_dtype): x = test_dtype(np.finfo(test_dtype).max) assert_equal(absfunc(x), x.real) - with suppress_warnings() as sup: - sup.filter(UserWarning) + with warnings.catch_warnings(): + warnings.simplefilter('ignore', UserWarning) x = test_dtype(np.finfo(test_dtype).tiny) assert_equal(absfunc(x), x.real) @@ -881,6 +871,7 @@ def recursionlimit(n): @given(sampled_from(objecty_things), sampled_from(binary_operators_for_scalar_ints), sampled_from(types + [rational])) +@pytest.mark.thread_unsafe(reason="sets recursion limit globally") def test_operator_object_left(o, op, type_): try: with recursionlimit(200): @@ -892,6 +883,7 @@ def test_operator_object_left(o, op, type_): @given(sampled_from(objecty_things), sampled_from(binary_operators_for_scalar_ints), sampled_from(types + [rational])) +@pytest.mark.thread_unsafe(reason="sets recursion limit globally") def test_operator_object_right(o, op, type_): try: with recursionlimit(200): diff --git a/numpy/_core/tests/test_shape_base.py b/numpy/_core/tests/test_shape_base.py index f7b944be08b7..5be3d05bbf11 100644 --- a/numpy/_core/tests/test_shape_base.py +++ b/numpy/_core/tests/test_shape_base.py @@ -1,3 +1,5 @@ +import sys + import pytest import numpy as np @@ -29,6 +31,7 @@ assert_raises, assert_raises_regex, ) +from numpy.testing._private.utils import requires_memory class TestAtleast1d: @@ -290,6 +293,22 @@ def test_exceptions(self): # No arrays to concatenate raises ValueError assert_raises(ValueError, concatenate, ()) + @pytest.mark.slow + @pytest.mark.skipif( + sys.maxsize < 2**32, + reason="only problematic on 64bit platforms" + ) + @requires_memory(2 * np.iinfo(np.intc).max) + @pytest.mark.thread_unsafe(reason="crashes with low memory") + def test_huge_list_error(self): + a = np.array([1]) + max_int = np.iinfo(np.intc).max + arrs = (a,) * (max_int + 1) + msg = (fr"concatenate\(\) only supports up to {max_int} arrays" + f" but got {max_int + 1}.") + with pytest.raises(ValueError, match=msg): + np.concatenate(arrs) + def test_concatenate_axis_None(self): a = np.arange(4, dtype=np.float64).reshape((2, 2)) b = list(range(3)) @@ -365,7 +384,15 @@ def test_concatenate(self): assert_(out is rout) assert_equal(res, rout) - @pytest.mark.skipif(IS_PYPY, reason="PYPY handles sq_concat, nb_add differently than cpython") + def test_concatenate_same_value(self): + r4 = list(range(4)) + with pytest.raises(ValueError, match="^casting must be one of"): + concatenate([r4, r4], casting="same_value") + + @pytest.mark.skipif( + IS_PYPY, + reason="PYPY handles sq_concat, nb_add differently than cpython" + ) def test_operator_concat(self): import operator a = array([1, 2]) diff --git a/numpy/_core/tests/test_simd.py b/numpy/_core/tests/test_simd.py index 697d89bcc26c..335abc98c84e 100644 --- a/numpy/_core/tests/test_simd.py +++ b/numpy/_core/tests/test_simd.py @@ -6,8 +6,8 @@ import re import pytest -from numpy._core._multiarray_umath import __cpu_baseline__ +from numpy._core._multiarray_umath import __cpu_baseline__ from numpy._core._simd import clear_floatstatus, get_floatstatus, targets @@ -271,7 +271,8 @@ def test_operators_shift(self): shr = self.shr(vdata_a, count) assert shr == data_shr_a - # shift by zero or max or out-range immediate constant is not applicable and illogical + # shift by zero or max or out-range immediate constant is not + # applicable and illogical for count in range(1, self._scalar_size()): # load to cast data_shl_a = self.load([a << count for a in data_a]) @@ -419,7 +420,8 @@ def test_sqrt(self): sqrt = self.sqrt(self.setall(case)) assert sqrt == pytest.approx(data_sqrt, nan_ok=True) - data_sqrt = self.load([math.sqrt(x) for x in data]) # load to truncate precision + # load to truncate precision + data_sqrt = self.load([math.sqrt(x) for x in data]) sqrt = self.sqrt(vdata) assert sqrt == data_sqrt @@ -1334,8 +1336,10 @@ def test_mask_conditional(self): for sfx in sfxes: skip_m = skip_sfx.get(sfx, skip) inhr = (cls,) - attr = {"npyv": targets[target_name], "sfx": sfx, "target_name": target_name} - tcls = type(f"Test{cls.__name__}_{simd_width}_{target_name}_{sfx}", inhr, attr) + attr = {"npyv": targets[target_name], "sfx": sfx, + "target_name": target_name} + type_name = f"Test{cls.__name__}_{simd_width}_{target_name}_{sfx}" + tcls = type(type_name, inhr, attr) if skip_m: pytest.mark.skip(reason=skip_m)(tcls) globals()[tcls.__name__] = tcls diff --git a/numpy/_core/tests/test_simd_module.py b/numpy/_core/tests/test_simd_module.py index dca83fd427b6..3de1596aa10a 100644 --- a/numpy/_core/tests/test_simd_module.py +++ b/numpy/_core/tests/test_simd_module.py @@ -23,7 +23,8 @@ int_sfx = unsigned_sfx + signed_sfx all_sfx = unsigned_sfx + int_sfx -@pytest.mark.skipif(not npyv, reason="could not find any SIMD extension with NPYV support") +@pytest.mark.skipif(not npyv, + reason="could not find any SIMD extension with NPYV support") class Test_SIMD_MODULE: @pytest.mark.parametrize('sfx', all_sfx) @@ -47,7 +48,8 @@ def test_raises(self): pytest.raises(TypeError, vcb("setall"), [1]) pytest.raises(TypeError, vcb("load"), 1) pytest.raises(ValueError, vcb("load"), [1]) - pytest.raises(ValueError, vcb("store"), [1], getattr(npyv, f"reinterpret_{sfx}_u32")(a)) + value = getattr(npyv, f"reinterpret_{sfx}_u32")(a) + pytest.raises(ValueError, vcb("store"), [1], value) @pytest.mark.skipif(not npyv2, reason=( "could not find a second SIMD extension with NPYV support" diff --git a/numpy/_core/tests/test_stringdtype.py b/numpy/_core/tests/test_stringdtype.py index 1c15c4895eaf..492894087aa9 100644 --- a/numpy/_core/tests/test_stringdtype.py +++ b/numpy/_core/tests/test_stringdtype.py @@ -2,26 +2,64 @@ import itertools import os import pickle +import string import sys import tempfile import pytest import numpy as np -from numpy._core.tests._natype import get_stringdtype_dtype as get_dtype from numpy._core.tests._natype import pd_NA from numpy.dtypes import StringDType from numpy.testing import IS_PYPY, assert_array_equal +def random_unicode_string_list(): + """Returns an array of 10 100-character strings containing random text""" + chars = list(string.ascii_letters + string.digits) + chars = np.array(chars, dtype="U1") + ret = np.random.choice(chars, size=100 * 10, replace=True) + return ret.view("U100") + + +def get_dtype(na_object, coerce=True): + """Helper to work around pd_NA boolean behavior""" + # explicit is check for pd_NA because != with pd_NA returns pd_NA + if na_object is pd_NA or na_object != "unset": + return np.dtypes.StringDType(na_object=na_object, coerce=coerce) + else: + return np.dtypes.StringDType(coerce=coerce) + + +@pytest.fixture(params=[True, False]) +def coerce(request): + """Coerce input to strings or raise an error for non-string input""" + return request.param + + +@pytest.fixture( + params=["unset", None, pd_NA, np.nan, float("nan"), "__nan__"], + ids=["unset", "None", "pandas.NA", "np.nan", "float('nan')", "string nan"], +) +def na_object(request): + """Possible values for the missing data sentinel""" + return request.param + + +@pytest.fixture() +def dtype(na_object, coerce): + """Cartesian project of missing data sentinel and string coercion options""" + return get_dtype(na_object, coerce) + @pytest.fixture def string_list(): + """Mix of short and long strings, some with unicode, some without""" return ["abc", "def", "ghi" * 10, "AÂĸ☃â‚Ŧ 😊" * 100, "Abc" * 1000, "DEF"] -# second copy for cast tests to do a cartesian product over dtypes @pytest.fixture(params=[True, False]) def coerce2(request): + """Second copy of the coerce fixture for tests that need two instances""" return request.param @@ -30,11 +68,13 @@ def coerce2(request): ids=["unset", "None", "pandas.NA", "np.nan", "float('nan')", "string nan"], ) def na_object2(request): + """Second copy of the na_object fixture for tests that need two instances""" return request.param @pytest.fixture() def dtype2(na_object2, coerce2): + """Second copy of the dtype fixture for tests that need two instances""" # explicit is check for pd_NA because != with pd_NA returns pd_NA if na_object2 is pd_NA or na_object2 != "unset": return StringDType(na_object=na_object2, coerce=coerce2) @@ -128,8 +168,8 @@ def test_null_roundtripping(): def test_string_too_large_error(): arr = np.array(["a", "b", "c"], dtype=StringDType()) - with pytest.raises(MemoryError): - arr * (2**63 - 2) + with pytest.raises(OverflowError): + arr * (sys.maxsize + 1) @pytest.mark.parametrize( @@ -267,12 +307,13 @@ def test_bytes_casts(self, dtype, strings): sarr.astype("S20") -def test_additional_unicode_cast(random_string_list, dtype): - arr = np.array(random_string_list, dtype=dtype) +def test_additional_unicode_cast(dtype): + string_list = random_unicode_string_list() + arr = np.array(string_list, dtype=dtype) # test that this short-circuits correctly assert_array_equal(arr, arr.astype(arr.dtype)) # tests the casts via the comparison promoter - assert_array_equal(arr, arr.astype(random_string_list.dtype)) + assert_array_equal(arr, arr.astype(string_list.dtype)) def test_insert_scalar(dtype, string_list): @@ -518,6 +559,25 @@ def test_fancy_indexing(string_list): assert_array_equal(a, b) assert a[0] == 'd' * 25 + # see gh-29279 + data = [ + ["AAAAAAAAAAAAAAAAA"], + ["BBBBBBBBBBBBBBBBBBBBBBBBBBBBB"], + ["CCCCCCCCCCCCCCCCC"], + ["DDDDDDDDDDDDDDDDD"], + ] + sarr = np.array(data, dtype=np.dtypes.StringDType()) + uarr = np.array(data, dtype="U30") + for ind in [[0], [1], [2], [3], [[0, 0]], [[1, 1, 3]], [[1, 1]]]: + assert_array_equal(sarr[ind], uarr[ind]) + + +def test_flatiter_indexing(): + # see gh-29659 + arr = np.array(['hello', 'world'], dtype='T') + arr.flat[:] = 9223372036854775 + assert_array_equal(arr, np.array([9223372036854775] * 2, dtype='T')) + def test_creation_functions(): assert_array_equal(np.zeros(3, dtype="T"), ["", "", ""]) @@ -1193,6 +1253,24 @@ def test_growing_strings(dtype): assert_array_equal(arr, uarr) +def test_assign_medium_strings(): + # see gh-29261 + N = 9 + src = np.array( + ( + ['0' * 256] * 3 + ['0' * 255] + ['0' * 256] + ['0' * 255] + + ['0' * 256] * 2 + ['0' * 255] + ), dtype='T') + dst = np.array( + ( + ['0' * 255] + ['0' * 256] * 2 + ['0' * 255] + ['0' * 256] + + ['0' * 255] + [''] * 5 + ), dtype='T') + + dst[1:N + 1] = src + assert_array_equal(dst[1:N + 1], src) + + UFUNC_TEST_DATA = [ "hello" * 10, "AeÂĸ☃â‚Ŧ 😊" * 20, @@ -1594,17 +1672,17 @@ class TestImplementation: """ @classmethod - def setup_class(self): - self.MISSING = 0x80 - self.INITIALIZED = 0x40 - self.OUTSIDE_ARENA = 0x20 - self.LONG = 0x10 - self.dtype = StringDType(na_object=np.nan) - self.sizeofstr = self.dtype.itemsize - sp = self.dtype.itemsize // 2 # pointer size = sizeof(size_t) + def setup_class(cls): + cls.MISSING = 0x80 + cls.INITIALIZED = 0x40 + cls.OUTSIDE_ARENA = 0x20 + cls.LONG = 0x10 + cls.dtype = StringDType(na_object=np.nan) + cls.sizeofstr = cls.dtype.itemsize + sp = cls.dtype.itemsize // 2 # pointer size = sizeof(size_t) # Below, size is not strictly correct, since it really uses # 7 (or 3) bytes, but good enough for the tests here. - self.view_dtype = np.dtype([ + cls.view_dtype = np.dtype([ ('offset', f'u{sp}'), ('size', f'u{sp // 2}'), ('xsiz', f'V{sp // 2 - 1}'), @@ -1615,13 +1693,13 @@ def setup_class(self): ('size', f'u{sp // 2}'), ('offset', f'u{sp}'), ]) - self.s_empty = "" - self.s_short = "01234" - self.s_medium = "abcdefghijklmnopqrstuvwxyz" - self.s_long = "-=+" * 100 - self.a = np.array( - [self.s_empty, self.s_short, self.s_medium, self.s_long], - self.dtype) + cls.s_empty = "" + cls.s_short = "01234" + cls.s_medium = "abcdefghijklmnopqrstuvwxyz" + cls.s_long = "-=+" * 100 + cls.a = np.array( + [cls.s_empty, cls.s_short, cls.s_medium, cls.s_long], + cls.dtype) def get_view(self, a): # Cannot view a StringDType as anything else directly, since diff --git a/numpy/_core/tests/test_strings.py b/numpy/_core/tests/test_strings.py index f6de208d7951..d8d23d47b5b8 100644 --- a/numpy/_core/tests/test_strings.py +++ b/numpy/_core/tests/test_strings.py @@ -4,7 +4,8 @@ import pytest import numpy as np -from numpy.testing import IS_PYPY, assert_array_equal, assert_raises +from numpy._core._exceptions import _UFuncNoLoopError +from numpy.testing import assert_array_equal, assert_raises from numpy.testing._private.utils import requires_memory COMPARISONS = [ @@ -18,8 +19,6 @@ MAX = np.iinfo(np.int64).max -IS_PYPY_LT_7_3_16 = IS_PYPY and sys.implementation.version < (7, 3, 16) - @pytest.mark.parametrize(["op", "ufunc", "sym"], COMPARISONS) def test_mixed_string_comparison_ufuncs_fail(op, ufunc, sym): arr_string = np.array(["a", "b"], dtype="S") @@ -135,6 +134,7 @@ def test_string_size_dtype_large_repr(str_dt): @pytest.mark.slow @requires_memory(2 * np.iinfo(np.intc).max) @pytest.mark.parametrize("str_dt", "US") +@pytest.mark.thread_unsafe(reason="crashes with low memory") def test_large_string_coercion_error(str_dt): very_large = np.iinfo(np.intc).max // np.dtype(f"{str_dt}1").itemsize try: @@ -162,6 +162,7 @@ def __str__(self): @pytest.mark.slow @requires_memory(2 * np.iinfo(np.intc).max) @pytest.mark.parametrize("str_dt", "US") +@pytest.mark.thread_unsafe(reason="crashes with low memory") def test_large_string_addition_error(str_dt): very_large = np.iinfo(np.intc).max // np.dtype(f"{str_dt}1").itemsize @@ -224,9 +225,20 @@ def test_multiply_raises(self, dt): with pytest.raises(TypeError, match="unsupported type"): np.strings.multiply(np.array("abc", dtype=dt), 3.14) - with pytest.raises(MemoryError): + with pytest.raises(OverflowError): np.strings.multiply(np.array("abc", dtype=dt), sys.maxsize) + def test_inplace_multiply(self, dt): + arr = np.array(['foo ', 'bar'], dtype=dt) + arr *= 2 + if dt != "T": + assert_array_equal(arr, np.array(['foo ', 'barb'], dtype=dt)) + else: + assert_array_equal(arr, ['foo foo ', 'barbar']) + + with pytest.raises(OverflowError): + arr *= sys.maxsize + @pytest.mark.parametrize("i_dt", [np.int8, np.int16, np.int32, np.int64, np.int_]) def test_multiply_integer_dtypes(self, i_dt, dt): @@ -810,6 +822,20 @@ def test_expandtabs_raises_overflow(self, dt): np.strings.expandtabs(np.array("\ta\n\tb", dtype=dt), sys.maxsize) np.strings.expandtabs(np.array("\ta\n\tb", dtype=dt), 2**61) + def test_expandtabs_length_not_cause_segfault(self, dt): + # see gh-28829 + with pytest.raises( + _UFuncNoLoopError, + match="did not contain a loop with signature matching types", + ): + np._core.strings._expandtabs_length.reduce(np.zeros(200)) + + with pytest.raises( + _UFuncNoLoopError, + match="did not contain a loop with signature matching types", + ): + np.strings.expandtabs(np.zeros(200)) + FILL_ERROR = "The fill character must be exactly one character long" def test_center_raises_multiple_character_fill(self, dt): @@ -835,6 +861,7 @@ def test_rjust_raises_multiple_character_fill(self, dt): ('abc', 6, ' ', ' abc '), ('abc', 3, ' ', 'abc'), ('abc', 2, ' ', 'abc'), + ('abc', -2, ' ', 'abc'), ('abc', 10, '*', '***abc****'), ]) def test_center(self, buf, width, fillchar, res, dt): @@ -848,6 +875,7 @@ def test_center(self, buf, width, fillchar, res, dt): ('abc', 6, ' ', 'abc '), ('abc', 3, ' ', 'abc'), ('abc', 2, ' ', 'abc'), + ('abc', -2, ' ', 'abc'), ('abc', 10, '*', 'abc*******'), ]) def test_ljust(self, buf, width, fillchar, res, dt): @@ -861,6 +889,7 @@ def test_ljust(self, buf, width, fillchar, res, dt): ('abc', 6, ' ', ' abc'), ('abc', 3, ' ', 'abc'), ('abc', 2, ' ', 'abc'), + ('abc', -2, ' ', 'abc'), ('abc', 10, '*', '*******abc'), ]) def test_rjust(self, buf, width, fillchar, res, dt): @@ -882,6 +911,7 @@ def test_rjust(self, buf, width, fillchar, res, dt): ('-0123', 5, '-0123'), ('000', 3, '000'), ('34', 1, '34'), + ('34', -1, '34'), ('0034', 4, '0034'), ]) def test_zfill(self, buf, width, res, dt): @@ -945,17 +975,39 @@ def test_rpartition(self, buf, sep, res1, res2, res3, dt): @pytest.mark.parametrize("args", [ (None,), + (None, None), + (None, None, -1), (0,), + (0, None), + (0, None, -1), (1,), + (1, None), + (1, None, -1), (3,), + (3, None), (5,), + (5, None), + (5, 5), + (5, 5, -1), (6,), # test index past the end + (6, None), + (6, None, -1), + (6, 7), # test start and stop index past the end + (4, 3), # test start > stop index (-1,), + (-1, None), + (-1, None, -1), (-3,), + (-3, None), ([3, 4],), + ([3, 4], None), ([2, 4],), ([-3, 5],), + ([-3, 5], None), + ([-3, 5], None, -1), ([0, -5],), + ([0, -5], None), + ([0, -5], None, -1), (1, 4), (-3, 5), (None, -1), @@ -965,8 +1017,16 @@ def test_rpartition(self, buf, sep, res1, res2, res3, dt): (None, None, -1), ([0, 6], [-1, 0], [2, -1]), ]) - def test_slice(self, args, dt): - buf = np.array(["hello", "world"], dtype=dt) + @pytest.mark.parametrize("buf", [ + ["hello", "world"], + ['hello world', 'ÎŗÎĩΚι ΃ÎŋĪ… ÎēΌ΃ÎŧÎĩ', 'äŊ åĨŊä¸–į•Œ', '👋 🌍'], + ]) + def test_slice(self, args, buf, dt): + if dt == "S" and "äŊ åĨŊä¸–į•Œ" in buf: + pytest.skip("Bytes dtype does not support non-ascii input") + if len(buf) == 4: + args = tuple(s * 2 if isinstance(s, list) else s for s in args) + buf = np.array(buf, dtype=dt) act = np.strings.slice(buf, *args) bcast_args = tuple(np.broadcast_to(arg, buf.shape) for arg in args) res = np.array([s[slice(*arg)] @@ -978,22 +1038,36 @@ def test_slice_unsupported(self, dt): with pytest.raises(TypeError, match="did not contain a loop"): np.strings.slice(np.array([1, 2, 3]), 4) - with pytest.raises(TypeError, match=r"Cannot cast ufunc '_slice' input .* from .* to dtype\('int(64|32)'\)"): - np.strings.slice(np.array(['foo', 'bar'], dtype=dt), np.array(['foo', 'bar'], dtype=dt)) + regexp = (r"Cannot cast ufunc '_slice' input .* " + r"from .* to dtype\('int(64|32)'\)") + with pytest.raises(TypeError, match=regexp): + np.strings.slice(np.array(['foo', 'bar'], dtype=dt), + np.array(['foo', 'bar'], dtype=dt)) - @pytest.mark.parametrize("int_dt", [np.int8, np.int16, np.int32, np.int64, - np.uint8, np.uint16, np.uint32, np.uint64]) + @pytest.mark.parametrize("int_dt", [np.int8, np.int16, np.int32, + np.int64, np.uint8, np.uint16, + np.uint32, np.uint64]) def test_slice_int_type_promotion(self, int_dt, dt): buf = np.array(["hello", "world"], dtype=dt) - - assert_array_equal(np.strings.slice(buf, int_dt(4)), np.array(["hell", "worl"], dtype=dt)) - assert_array_equal(np.strings.slice(buf, np.array([4, 4], dtype=int_dt)), np.array(["hell", "worl"], dtype=dt)) - - assert_array_equal(np.strings.slice(buf, int_dt(2), int_dt(4)), np.array(["ll", "rl"], dtype=dt)) - assert_array_equal(np.strings.slice(buf, np.array([2, 2], dtype=int_dt), np.array([4, 4], dtype=int_dt)), np.array(["ll", "rl"], dtype=dt)) - - assert_array_equal(np.strings.slice(buf, int_dt(0), int_dt(4), int_dt(2)), np.array(["hl", "wr"], dtype=dt)) - assert_array_equal(np.strings.slice(buf, np.array([0, 0], dtype=int_dt), np.array([4, 4], dtype=int_dt), np.array([2, 2], dtype=int_dt)), np.array(["hl", "wr"], dtype=dt)) + np_slice = np.strings.slice + assert_array_equal(np_slice(buf, int_dt(4)), + np.array(["hell", "worl"], dtype=dt)) + assert_array_equal(np_slice(buf, np.array([4, 4], dtype=int_dt)), + np.array(["hell", "worl"], dtype=dt)) + + assert_array_equal(np_slice(buf, int_dt(2), int_dt(4)), + np.array(["ll", "rl"], dtype=dt)) + assert_array_equal(np_slice(buf, np.array([2, 2], dtype=int_dt), + np.array([4, 4], dtype=int_dt)), + np.array(["ll", "rl"], dtype=dt)) + + assert_array_equal(np_slice(buf, int_dt(0), int_dt(4), int_dt(2)), + np.array(["hl", "wr"], dtype=dt)) + assert_array_equal(np_slice(buf, + np.array([0, 0], dtype=int_dt), + np.array([4, 4], dtype=int_dt), + np.array([2, 2], dtype=int_dt)), + np.array(["hl", "wr"], dtype=dt)) @pytest.mark.parametrize("dt", ["U", "T"]) class TestMethodsWithUnicode: @@ -1047,10 +1121,7 @@ def test_replace_unicode(self, buf, old, new, count, res, dt): '\U0001D7F6', '\U00011066', '\U000104A0', - pytest.param('\U0001F107', marks=pytest.mark.xfail( - sys.platform == 'win32' and IS_PYPY_LT_7_3_16, - reason="PYPY bug in Py_UNICODE_ISALNUM", - strict=True)), + '\U0001F107', ]) def test_isalnum_unicode(self, in_, dt): in_ = np.array(in_, dtype=dt) @@ -1064,10 +1135,7 @@ def test_isalnum_unicode(self, in_, dt): ('\U0001F40D', False), ('\U0001F46F', False), ('\u2177', True), - pytest.param('\U00010429', True, marks=pytest.mark.xfail( - sys.platform == 'win32' and IS_PYPY_LT_7_3_16, - reason="PYPY bug in Py_UNICODE_ISLOWER", - strict=True)), + ('\U00010429', True), ('\U0001044E', True), ]) def test_islower_unicode(self, in_, out, dt): @@ -1082,10 +1150,7 @@ def test_islower_unicode(self, in_, out, dt): ('\U0001F40D', False), ('\U0001F46F', False), ('\u2177', False), - pytest.param('\U00010429', False, marks=pytest.mark.xfail( - sys.platform == 'win32' and IS_PYPY_LT_7_3_16, - reason="PYPY bug in Py_UNICODE_ISUPPER", - strict=True)), + ('\U00010429', False), ('\U0001044E', False), ]) def test_isupper_unicode(self, in_, out, dt): @@ -1095,15 +1160,9 @@ def test_isupper_unicode(self, in_, out, dt): @pytest.mark.parametrize("in_,out", [ ('\u1FFc', True), ('Greek \u1FFcitlecases ...', True), - pytest.param('\U00010401\U00010429', True, marks=pytest.mark.xfail( - sys.platform == 'win32' and IS_PYPY_LT_7_3_16, - reason="PYPY bug in Py_UNICODE_ISISTITLE", - strict=True)), + ('\U00010401\U00010429', True), ('\U00010427\U0001044E', True), - pytest.param('\U00010429', False, marks=pytest.mark.xfail( - sys.platform == 'win32' and IS_PYPY_LT_7_3_16, - reason="PYPY bug in Py_UNICODE_ISISTITLE", - strict=True)), + ('\U00010429', False), ('\U0001044E', False), ('\U0001F40D', False), ('\U0001F46F', False), diff --git a/numpy/_core/tests/test_ufunc.py b/numpy/_core/tests/test_ufunc.py index f2b3f5a35a37..09d01eab8186 100644 --- a/numpy/_core/tests/test_ufunc.py +++ b/numpy/_core/tests/test_ufunc.py @@ -1,16 +1,17 @@ import ctypes as ct +import inspect import itertools import pickle import sys import warnings -import numpy._core._operand_flag_tests as opflag_tests -import numpy._core._rational_tests as _rational_tests -import numpy._core._umath_tests as umt import pytest from pytest import param import numpy as np +import numpy._core._operand_flag_tests as opflag_tests +import numpy._core._rational_tests as _rational_tests +import numpy._core._umath_tests as umt import numpy._core.umath as ncu import numpy.linalg._umath_linalg as uml from numpy.exceptions import AxisError @@ -26,7 +27,6 @@ assert_equal, assert_no_warnings, assert_raises, - suppress_warnings, ) from numpy.testing._private.utils import requires_memory @@ -686,8 +686,8 @@ def test_true_divide(self): tgt = float(x) / float(y) rtol = max(np.finfo(dtout).resolution, 1e-15) # The value of tiny for double double is NaN - with suppress_warnings() as sup: - sup.filter(UserWarning) + with warnings.catch_warnings(): + warnings.simplefilter('ignore', UserWarning) if not np.isnan(np.finfo(dtout).tiny): atol = max(np.finfo(dtout).tiny, 3e-308) else: @@ -706,8 +706,8 @@ def test_true_divide(self): tgt = complex(x) / complex(y) rtol = max(np.finfo(dtout).resolution, 1e-15) # The value of tiny for double double is NaN - with suppress_warnings() as sup: - sup.filter(UserWarning) + with warnings.catch_warnings(): + warnings.simplefilter('ignore', UserWarning) if not np.isnan(np.finfo(dtout).tiny): atol = max(np.finfo(dtout).tiny, 3e-308) else: @@ -1100,17 +1100,15 @@ def test_output_ellipsis_errors(self): match=r"out=\.\.\. is only allowed as a keyword argument."): np.add.reduce(1, (), None, ...) - with pytest.raises(TypeError, - match=r"must use `\.\.\.` as `out=\.\.\.` and not per-operand/in a tuple"): + type_error = r"must use `\.\.\.` as `out=\.\.\.` and not per-operand/in a tuple" + with pytest.raises(TypeError, match=type_error): np.negative(1, out=(...,)) - with pytest.raises(TypeError, - match=r"must use `\.\.\.` as `out=\.\.\.` and not per-operand/in a tuple"): + with pytest.raises(TypeError, match=type_error): # We only allow out=... not individual args for now np.divmod(1, 2, out=(np.empty(()), ...)) - with pytest.raises(TypeError, - match=r"must use `\.\.\.` as `out=\.\.\.` and not per-operand/in a tuple"): + with pytest.raises(TypeError, match=type_error): np.add.reduce(1, out=(...,)) def test_axes_argument(self): @@ -1556,7 +1554,8 @@ def __eq__(self, other): arr1d = np.array([HasComparisons()]) assert_equal(arr1d == arr1d, np.array([True])) - assert_equal(np.equal(arr1d, arr1d), np.array([True])) # normal behavior is a cast + # normal behavior is a cast + assert_equal(np.equal(arr1d, arr1d), np.array([True])) assert_equal(np.equal(arr1d, arr1d, dtype=object), np.array(['=='])) def test_object_array_reduction(self): @@ -1712,9 +1711,6 @@ def test_where_param(self): assert_equal(a, [[0, 27], [14, 5]]) def test_where_param_buffer_output(self): - # This test is temporarily skipped because it requires - # adding masking features to the nditer to work properly - # With casting on output a = np.ones(10, np.int64) b = np.ones(10, np.int64) @@ -1726,12 +1722,12 @@ def test_where_param_alloc(self): # With casting and allocated output a = np.array([1], dtype=np.int64) m = np.array([True], dtype=bool) - assert_equal(np.sqrt(a, where=m), [1]) + assert_equal(np.sqrt(a, where=m, out=None), [1]) # No casting and allocated output a = np.array([1], dtype=np.float64) m = np.array([True], dtype=bool) - assert_equal(np.sqrt(a, where=m), [1]) + assert_equal(np.sqrt(a, where=m, out=None), [1]) def test_where_with_broadcasting(self): # See gh-17198 @@ -1745,6 +1741,17 @@ def test_where_with_broadcasting(self): assert_array_equal((a[where] < b_where), out[where].astype(bool)) assert not out[~where].any() # outside mask, out remains all 0 + def test_where_warns(self): + a = np.arange(7) + mask = a % 2 == 0 + with pytest.warns(UserWarning, match="'where' used without 'out'"): + result1 = np.add(a, a, where=mask) + # Does not warn + result2 = np.add(a, a, where=mask, out=None) + # Sanity check + assert np.all(result1[::2] == [0, 4, 8, 12]) + assert np.all(result2[::2] == [0, 4, 8, 12]) + @staticmethod def identityless_reduce_arrs(): yield np.empty((2, 3, 4), order='C') @@ -1762,10 +1769,11 @@ def identityless_reduce_arrs(): a = a[1:, 1:, 1:] yield a - @pytest.mark.parametrize("a", identityless_reduce_arrs()) + @pytest.mark.parametrize("arrs", identityless_reduce_arrs()) @pytest.mark.parametrize("pos", [(1, 0, 0), (0, 1, 0), (0, 0, 1)]) - def test_identityless_reduction(self, a, pos): + def test_identityless_reduction(self, arrs, pos): # np.minimum.reduce is an identityless reduction + a = arrs.copy() a[...] = 1 a[pos] = 0 @@ -1789,6 +1797,7 @@ def test_identityless_reduction(self, a, pos): @requires_memory(6 * 1024**3) @pytest.mark.skipif(sys.maxsize < 2**32, reason="test array too large for 32bit platform") + @pytest.mark.thread_unsafe(reason="crashes with low memory") def test_identityless_reduction_huge_array(self): # Regression test for gh-20921 (copying identity incorrectly failed) arr = np.zeros((2, 2**31), 'uint8') @@ -1914,7 +1923,7 @@ def test_identityless_reduction_nonreorderable(self): assert_raises(ValueError, np.divide.reduce, a, axis=(0, 1)) def test_reduce_zero_axis(self): - # If we have a n x m array and do a reduction with axis=1, then we are + # If we have an n x m array and do a reduction with axis=1, then we are # doing n reductions, and each reduction takes an m-element array. For # a reduction operation without an identity, then: # n > 0, m > 0: fine @@ -2123,15 +2132,16 @@ class ArrayPriorityMinus1000b(ArrayPriorityBase): class ArrayPriorityMinus2000(ArrayPriorityBase): __array_priority__ = -2000 - x = ArrayPriorityMinus1000(2) - xb = ArrayPriorityMinus1000b(2) - y = ArrayPriorityMinus2000(2) + x = np.ones(2).view(ArrayPriorityMinus1000) + xb = np.ones(2).view(ArrayPriorityMinus1000b) + y = np.ones(2).view(ArrayPriorityMinus2000) assert np.add(x, y) is ArrayPriorityMinus1000 assert np.add(y, x) is ArrayPriorityMinus1000 assert np.add(x, xb) is ArrayPriorityMinus1000 assert np.add(xb, x) is ArrayPriorityMinus1000b - assert np.add(np.zeros(2), ArrayPriorityMinus0(2)) is ArrayPriorityMinus0 + y_minus0 = np.zeros(2).view(ArrayPriorityMinus0) + assert np.add(np.zeros(2), y_minus0) is ArrayPriorityMinus0 assert type(np.add(xb, x, np.zeros(2))) is np.ndarray @pytest.mark.parametrize("a", ( @@ -2967,6 +2977,21 @@ def test_ufunc_input_floatingpoint_error(bad_offset): np.add(arr, arr, dtype=np.intp, casting="unsafe") +@pytest.mark.skipif(sys.flags.optimize == 2, reason="Python running -OO") +@pytest.mark.xfail(IS_PYPY, reason="PyPy does not modify tp_doc") +@pytest.mark.parametrize( + "methodname", + ["__call__", "accumulate", "at", "outer", "reduce", "reduceat", "resolve_dtypes"], +) +def test_ufunc_method_signatures(methodname: str): + method = getattr(np.ufunc, methodname) + + try: + _ = inspect.signature(method) + except ValueError as e: + pytest.fail(e.args[0]) + + def test_trivial_loop_invalid_cast(): # This tests the fast-path "invalid cast", see gh-19904. with pytest.raises(TypeError, @@ -3004,6 +3029,45 @@ def test_reduce_casterrors(offset): assert out[()] < value * offset +@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts") +def test_reduction_no_reference_leak(): + # Test that the generic reduction does not leak references. + # gh-29358 + arr = np.array([1, 2, 3], dtype=np.int32) + count = sys.getrefcount(arr) + + np.add.reduce(arr, dtype=np.int32, initial=0) + assert count == sys.getrefcount(arr) + + np.add.accumulate(arr, dtype=np.int32) + assert count == sys.getrefcount(arr) + + np.add.reduceat(arr, [0, 1], dtype=np.int32) + assert count == sys.getrefcount(arr) + + # with `out=` the reference count is not changed + out = np.empty((), dtype=np.int32) + out_count = sys.getrefcount(out) + + np.add.reduce(arr, dtype=np.int32, out=out, initial=0) + assert count == sys.getrefcount(arr) + assert out_count == sys.getrefcount(out) + + out = np.empty(arr.shape, dtype=np.int32) + out_count = sys.getrefcount(out) + + np.add.accumulate(arr, dtype=np.int32, out=out) + assert count == sys.getrefcount(arr) + assert out_count == sys.getrefcount(out) + + out = np.empty((2,), dtype=np.int32) + out_count = sys.getrefcount(out) + + np.add.reduceat(arr, [0, 1], dtype=np.int32, out=out) + assert count == sys.getrefcount(arr) + assert out_count == sys.getrefcount(out) + + def test_object_reduce_cleanup_on_failure(): # Test cleanup, including of the initial value (manually provided or not) with pytest.raises(TypeError): @@ -3200,6 +3264,7 @@ def test_resolve_dtypes_reduction_errors(self): @pytest.mark.skipif(not hasattr(ct, "pythonapi"), reason="`ctypes.pythonapi` required for capsule unpacking.") + @pytest.mark.thread_unsafe(reason="modifies global object in the ctypes API") def test_loop_access(self): # This is a basic test for the full strided loop access data_t = ct.c_char_p * 2 @@ -3271,3 +3336,70 @@ def test_long_arrays(self): t[28][414] = 1 tc = np.cos(t) assert_equal(tc[0][0], tc[28][414]) + + +class TestUFuncInspectSignature: + PARAMS_COMMON = { + "casting": "same_kind", + "order": "K", + "dtype": None, + "subok": True, + "signature": None, + } + + PARAMS_UFUNC = { + "where": True, + } | PARAMS_COMMON + + PARAMS_GUFUNC = { + "axes": np._NoValue, + "axis": np._NoValue, + "keepdims": False, + } | PARAMS_COMMON + + @pytest.mark.parametrize("ufunc", [np.log, np.gcd, np.frexp, np.divmod, np.matvec]) + def test_dunder_signature_attr(self, ufunc: np.ufunc): + assert hasattr(ufunc, "__signature__") + assert isinstance(ufunc.__signature__, inspect.Signature) + assert inspect.signature(ufunc) == ufunc.__signature__ + + @pytest.mark.parametrize("ufunc", [np.exp, np.mod, np.frexp, np.divmod, np.vecmat]) + def test_params_common_positional(self, ufunc: np.ufunc): + sig = inspect.signature(ufunc) + + # check positional-only parameters + posonly_params = {name: param.default + for name, param in sig.parameters.items() + if param.kind is param.POSITIONAL_ONLY} + assert len(posonly_params) == ufunc.nin + assert all(default is inspect.Parameter.empty + for default in posonly_params.values()) + + # check 'out' parameter + out_param = sig.parameters.get("out") + assert out_param is not None + assert out_param.kind is inspect.Parameter.POSITIONAL_OR_KEYWORD + + @pytest.mark.parametrize("ufunc", [np.sin, np.add, np.frexp, np.divmod]) + def test_params_common_ufunc(self, ufunc: np.ufunc): + assert ufunc.signature is None # sanity check + + sig = inspect.signature(ufunc) + + # check keyword-only parameters + keyword_params = {name: param.default + for name, param in sig.parameters.items() + if param.kind is param.KEYWORD_ONLY} + assert keyword_params == self.PARAMS_UFUNC + + @pytest.mark.parametrize("gufunc", [np.matmul, np.matvec, np.vecdot, np.vecmat]) + def test_params_common_gufunc(self, gufunc: np.ufunc): + assert gufunc.signature is not None # sanity check + + sig = inspect.signature(gufunc) + + # check keyword-only parameters + keyword_params = {name: param.default + for name, param in sig.parameters.items() + if param.kind is param.KEYWORD_ONLY} + assert keyword_params == self.PARAMS_GUFUNC diff --git a/numpy/_core/tests/test_umath.py b/numpy/_core/tests/test_umath.py index 4b698ce82bc6..40b815f88984 100644 --- a/numpy/_core/tests/test_umath.py +++ b/numpy/_core/tests/test_umath.py @@ -1,4 +1,5 @@ import fnmatch +import inspect import itertools import operator import platform @@ -12,8 +13,7 @@ import numpy as np import numpy._core.umath as ncu -from numpy._core import _umath_tests as ncu_tests -from numpy._core import sctypes +from numpy._core import _umath_tests as ncu_tests, sctypes from numpy.testing import ( HAS_REFCOUNT, IS_MUSL, @@ -31,7 +31,6 @@ assert_no_warnings, assert_raises, assert_raises_regex, - suppress_warnings, ) from numpy.testing._private.utils import _glibc_older_than @@ -704,8 +703,8 @@ def test_floor_division_corner_cases(self, dtype): fone = np.array(1.0, dtype=dtype) fzer = np.array(0.0, dtype=dtype) finf = np.array(np.inf, dtype=dtype) - with suppress_warnings() as sup: - sup.filter(RuntimeWarning, "invalid value encountered in floor_divide") + with warnings.catch_warnings(): + warnings.filterwarnings('ignore', "invalid value encountered in floor_divide", RuntimeWarning) div = np.floor_divide(fnan, fone) assert np.isnan(div), f"div: {div}" div = np.floor_divide(fone, fnan) @@ -860,9 +859,9 @@ def test_float_divmod_corner_cases(self): fone = np.array(1.0, dtype=dt) fzer = np.array(0.0, dtype=dt) finf = np.array(np.inf, dtype=dt) - with suppress_warnings() as sup: - sup.filter(RuntimeWarning, "invalid value encountered in divmod") - sup.filter(RuntimeWarning, "divide by zero encountered in divmod") + with warnings.catch_warnings(): + warnings.filterwarnings('ignore', "invalid value encountered in divmod", RuntimeWarning) + warnings.filterwarnings('ignore', "divide by zero encountered in divmod", RuntimeWarning) div, rem = np.divmod(fone, fzer) assert np.isinf(div), f'dt: {dt}, div: {rem}' assert np.isnan(rem), f'dt: {dt}, rem: {rem}' @@ -899,9 +898,9 @@ def test_float_remainder_corner_cases(self): assert_(rem >= -b, f'dt: {dt}') # Check nans, inf - with suppress_warnings() as sup: - sup.filter(RuntimeWarning, "invalid value encountered in remainder") - sup.filter(RuntimeWarning, "invalid value encountered in fmod") + with warnings.catch_warnings(): + warnings.filterwarnings('ignore', "invalid value encountered in remainder", RuntimeWarning) + warnings.filterwarnings('ignore', "invalid value encountered in fmod", RuntimeWarning) for dt in np.typecodes['Float']: fone = np.array(1.0, dtype=dt) fzer = np.array(0.0, dtype=dt) @@ -1879,8 +1878,15 @@ def test_unary_spurious_fpexception(self, ufunc, dtype, data, escape): # FIXME: NAN raises FP invalid exception: # - ceil/float16 on MSVC:32-bit # - spacing/float16 on almost all platforms + # - spacing/float32,float64 on Windows MSVC with VS2022 if ufunc in (np.spacing, np.ceil) and dtype == 'e': return + # Skip spacing tests with NaN on Windows MSVC (all dtypes) + import platform + if (ufunc == np.spacing and + platform.system() == 'Windows' and + any(np.isnan(d) if isinstance(d, (int, float)) else False for d in data)): + pytest.skip("spacing with NaN generates warnings on Windows/VS2022") array = np.array(data, dtype=dtype) with assert_no_warnings(): ufunc(array) @@ -2951,9 +2957,11 @@ def test_minmax_blocked(self): inp[:] = np.arange(inp.size, dtype=dt) inp[i] = np.nan emsg = lambda: f'{inp!r}\n{msg}' - with suppress_warnings() as sup: - sup.filter(RuntimeWarning, - "invalid value encountered in reduce") + with warnings.catch_warnings(): + warnings.filterwarnings( + 'ignore', + "invalid value encountered in reduce", + RuntimeWarning) assert_(np.isnan(inp.max()), msg=emsg) assert_(np.isnan(inp.min()), msg=emsg) @@ -3149,7 +3157,8 @@ def do_test(f_call, f_expected): do_test(lambda a: np.add(0, 0, out=(a,)), lambda a: (0, 0, a)) # Also check the where mask handling: - do_test(lambda a: np.add(a, 0, where=False), lambda a: (a, 0)) + out = np.zeros([1], dtype=float) + do_test(lambda a: np.add(a, 0, where=False, out=None), lambda a: (a, 0)) do_test(lambda a: np.add(0, 0, a, where=False), lambda a: (0, 0, a)) def test_wrap_with_iterable(self): @@ -3706,7 +3715,7 @@ def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): kwargs = kwargs.copy() if "out" in kwargs: - kwargs["out"] = self._unwrap(kwargs["out"]) + kwargs["out"] = self._unwrap(kwargs["out"])[0] if kwargs["out"] is NotImplemented: return NotImplemented @@ -3737,21 +3746,28 @@ def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): array = np.array([1, 2, 3]) where = np.array([True, False, True]) - expected = ufunc(array, where=where) + out = np.zeros(3, dtype=array.dtype) + expected = ufunc(array, where=where, out=out) with pytest.raises(TypeError): - ufunc(array, where=where.view(OverriddenArrayOld)) + ufunc( + array, + where=where.view(OverriddenArrayOld), + out=out, + ) result_1 = ufunc( array, - where=where.view(OverriddenArrayNew) + where=where.view(OverriddenArrayNew), + out=out, ) assert isinstance(result_1, OverriddenArrayNew) assert np.all(np.array(result_1) == expected, where=where) result_2 = ufunc( array.view(OverriddenArrayNew), - where=where.view(OverriddenArrayNew) + where=where.view(OverriddenArrayNew), + out=out.view(OverriddenArrayNew), ) assert isinstance(result_2, OverriddenArrayNew) assert np.all(np.array(result_2) == expected, where=where) @@ -4031,12 +4047,15 @@ def test_array_ufunc_direct_call(self): res = a.__array_ufunc__(np.add, "__call__", a, a) assert_array_equal(res, a + a) + @pytest.mark.thread_unsafe(reason="modifies global module") + @pytest.mark.skipif(IS_PYPY, reason="__signature__ descriptor dance fails") def test_ufunc_docstring(self): original_doc = np.add.__doc__ new_doc = "new docs" expected_dict = ( {} if IS_PYPY else {"__module__": "numpy", "__qualname__": "add"} ) + expected_dict["__signature__"] = inspect.signature(np.add) np.add.__doc__ = new_doc assert np.add.__doc__ == new_doc @@ -4597,8 +4616,8 @@ def test_nextafter_0(): for t, direction in itertools.product(np._core.sctypes['float'], (1, -1)): # The value of tiny for double double is NaN, so we need to pass the # assert - with suppress_warnings() as sup: - sup.filter(UserWarning) + with warnings.catch_warnings(): + warnings.simplefilter('ignore', UserWarning) if not np.isnan(np.finfo(t).tiny): tiny = np.finfo(t).tiny assert_( @@ -4701,6 +4720,18 @@ def test_reduceat(): np.setbufsize(ncu.UFUNC_BUFSIZE_DEFAULT) assert_array_almost_equal(h1, h2) +def test_negative_value_raises(): + with pytest.raises(ValueError, match="buffer size must be non-negative"): + np.setbufsize(-5) + + old = np.getbufsize() + try: + prev = np.setbufsize(4096) + assert prev == old + assert np.getbufsize() == 4096 + finally: + np.setbufsize(old) + def test_reduceat_empty(): """Reduceat should work with empty arrays""" indices = np.array([], 'i4') @@ -4865,6 +4896,15 @@ def test_bad_legacy_ufunc_silent_errors(): ncu_tests.always_error.at(arr, [0, 1, 2], arr) +def test_bad_legacy_unary_ufunc_silent_errors(): + # Unary has a special scalar path right now, so test it explicitly. + with pytest.raises(RuntimeError, match=r"How unexpected :\)!"): + ncu_tests.always_error_unary(np.arange(3).astype(np.float64)) + + with pytest.raises(RuntimeError, match=r"How unexpected :\)!"): + ncu_tests.always_error_unary(1.5) + + @pytest.mark.parametrize('x1', [np.arange(3.0), [0.0, 1.0, 2.0]]) def test_bad_legacy_gufunc_silent_errors(x1): # Verify that an exception raised in a gufunc loop propagates correctly. @@ -4911,3 +4951,12 @@ def test_ufunc_arg(self): @pytest.mark.filterwarnings("ignore:_add_newdoc_ufunc:DeprecationWarning") def test_string_arg(self): assert_raises(TypeError, ncu._add_newdoc_ufunc, np.add, 3) + +class TestHypotErrorMessages: + def test_hypot_error_message_single_arg(self): + with pytest.raises(TypeError, match="hypot\\(\\) takes .* but 1 was given"): + np.hypot(5) + + def test_hypot_error_message_multiple_args(self): + with pytest.raises(TypeError, match="hypot\\(\\) takes .* but 4 were given"): + np.hypot(1, 2, 3, 4) diff --git a/numpy/_core/tests/test_umath_accuracy.py b/numpy/_core/tests/test_umath_accuracy.py index 5707e9279d5b..3ca2f508672e 100644 --- a/numpy/_core/tests/test_umath_accuracy.py +++ b/numpy/_core/tests/test_umath_accuracy.py @@ -4,9 +4,9 @@ from os import path import pytest -from numpy._core._multiarray_umath import __cpu_features__ import numpy as np +from numpy._core._multiarray_umath import __cpu_features__ from numpy.testing import assert_array_max_ulp from numpy.testing._private.utils import _glibc_older_than @@ -68,8 +68,16 @@ def test_validate_transcendentals(self): npfunc = getattr(np, npname) for datatype in np.unique(data['type']): data_subset = data[data['type'] == datatype] - inval = np.array(str_to_float(data_subset['input'].astype(str), data_subset['type'].astype(str)), dtype=eval(datatype)) - outval = np.array(str_to_float(data_subset['output'].astype(str), data_subset['type'].astype(str)), dtype=eval(datatype)) + data_input_str = data_subset['input'].astype(str) + data_output_str = data_subset['output'].astype(str) + data_type_str = data_subset['type'].astype(str) + + inval = np.array(str_to_float(data_input_str, + data_type_str), + dtype=eval(datatype)) + outval = np.array(str_to_float(data_output_str, + data_type_str), + dtype=eval(datatype)) perm = np.random.permutation(len(inval)) inval = inval[perm] outval = outval[perm] diff --git a/numpy/_core/tests/test_umath_complex.py b/numpy/_core/tests/test_umath_complex.py index a97af475def4..7012e7e357fe 100644 --- a/numpy/_core/tests/test_umath_complex.py +++ b/numpy/_core/tests/test_umath_complex.py @@ -1,11 +1,12 @@ import platform import sys -# import the c-extension module directly since _arg is not exported via umath -import numpy._core._multiarray_umath as ncu import pytest import numpy as np + +# import the c-extension module directly since _arg is not exported via umath +import numpy._core._multiarray_umath as ncu from numpy.testing import ( assert_almost_equal, assert_array_equal, @@ -561,31 +562,35 @@ class TestSpecialComplexAVX: @pytest.mark.parametrize("stride", [-4, -2, -1, 1, 2, 4]) @pytest.mark.parametrize("astype", [np.complex64, np.complex128]) def test_array(self, stride, astype): - arr = np.array([complex(np.nan, np.nan), - complex(np.nan, np.inf), - complex(np.inf, np.nan), - complex(np.inf, np.inf), - complex(0., np.inf), - complex(np.inf, 0.), - complex(0., 0.), - complex(0., np.nan), - complex(np.nan, 0.)], dtype=astype) - abs_true = np.array([np.nan, np.inf, np.inf, np.inf, np.inf, np.inf, 0., np.nan, np.nan], dtype=arr.real.dtype) - sq_true = np.array([complex(np.nan, np.nan), - complex(np.nan, np.nan), - complex(np.nan, np.nan), - complex(np.nan, np.inf), - complex(-np.inf, np.nan), - complex(np.inf, np.nan), - complex(0., 0.), - complex(np.nan, np.nan), - complex(np.nan, np.nan)], dtype=astype) + nan = np.nan + inf = np.inf + arr = np.array([complex(nan, nan), + complex(nan, inf), + complex(inf, nan), + complex(inf, inf), + complex(0., inf), + complex(inf, 0.), + complex(0., 0.), + complex(0., nan), + complex(nan, 0.)], dtype=astype) + abs_true = np.array([nan, inf, inf, inf, inf, inf, 0., nan, nan], + dtype=arr.real.dtype) + sq_true = np.array([complex(nan, nan), + complex(nan, nan), + complex(nan, nan), + complex(nan, inf), + complex(-inf, nan), + complex(inf, nan), + complex(0., 0.), + complex(nan, nan), + complex(nan, nan)], dtype=astype) with np.errstate(invalid='ignore'): assert_equal(np.abs(arr[::stride]), abs_true[::stride]) assert_equal(np.square(arr[::stride]), sq_true[::stride]) class TestComplexAbsoluteAVX: - @pytest.mark.parametrize("arraysize", [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 15, 17, 18, 19]) + @pytest.mark.parametrize("arraysize", + [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 15, 17, 18, 19]) @pytest.mark.parametrize("stride", [-4, -3, -2, -1, 1, 2, 3, 4]) @pytest.mark.parametrize("astype", [np.complex64, np.complex128]) # test to ensure masking and strides work as intended in the AVX implementation diff --git a/numpy/_core/tests/test_unicode.py b/numpy/_core/tests/test_unicode.py index 9fdc55b0e322..6a86503a35ae 100644 --- a/numpy/_core/tests/test_unicode.py +++ b/numpy/_core/tests/test_unicode.py @@ -134,7 +134,8 @@ def test_valuesSD(self): def test_valuesMD(self): # Check creation of multi-dimensional objects with values - ua = np.array([[[self.ucs_value * self.ulen] * 2] * 3] * 4, dtype=f'U{self.ulen}') + data = [[[self.ucs_value * self.ulen] * 2] * 3] * 4 + ua = np.array(data, dtype=f'U{self.ulen}') self.content_check(ua, ua[0, 0, 0], 4 * self.ulen * 2 * 3 * 4) self.content_check(ua, ua[-1, -1, -1], 4 * self.ulen * 2 * 3 * 4) diff --git a/numpy/_core/umath.py b/numpy/_core/umath.py index 94f97c059187..cc4b8a1238f0 100644 --- a/numpy/_core/umath.py +++ b/numpy/_core/umath.py @@ -53,8 +53,8 @@ 'heaviside', 'hypot', 'invert', 'isfinite', 'isinf', 'isnan', 'isnat', 'lcm', 'ldexp', 'left_shift', 'less', 'less_equal', 'log', 'log10', 'log1p', 'log2', 'logaddexp', 'logaddexp2', 'logical_and', 'logical_not', - 'logical_or', 'logical_xor', 'matvec', 'maximum', 'minimum', 'mod', 'modf', - 'multiply', 'negative', 'nextafter', 'not_equal', 'pi', 'positive', + 'logical_or', 'logical_xor', 'matmul', 'matvec', 'maximum', 'minimum', 'mod', + 'modf', 'multiply', 'negative', 'nextafter', 'not_equal', 'pi', 'positive', 'power', 'rad2deg', 'radians', 'reciprocal', 'remainder', 'right_shift', 'rint', 'sign', 'signbit', 'sin', 'sinh', 'spacing', 'sqrt', 'square', 'subtract', 'tan', 'tanh', 'true_divide', 'trunc', 'vecdot', 'vecmat'] diff --git a/numpy/_core/umath.pyi b/numpy/_core/umath.pyi index d9f0d384cf6d..498a0af2c008 100644 --- a/numpy/_core/umath.pyi +++ b/numpy/_core/umath.pyi @@ -1,3 +1,9 @@ +import contextvars +from _typeshed import SupportsWrite +from collections.abc import Callable +from typing import Any, Final, Literal, TypedDict, Unpack, type_check_only +from typing_extensions import CapsuleType + from numpy import ( absolute, add, @@ -63,6 +69,7 @@ from numpy import ( logical_not, logical_or, logical_xor, + matmul, matvec, maximum, minimum, @@ -162,6 +169,7 @@ __all__ = [ "logical_not", "logical_or", "logical_xor", + "matmul", "matvec", "maximum", "minimum", @@ -195,3 +203,30 @@ __all__ = [ "vecdot", "vecmat", ] + +### + +type _ErrKind = Literal["ignore", "warn", "raise", "call", "print", "log"] +type _ErrCall = Callable[[str, int], Any] | SupportsWrite[str] + +@type_check_only +class _ExtOjbDict(TypedDict, total=False): + divide: _ErrKind + over: _ErrKind + under: _ErrKind + invalid: _ErrKind + call: _ErrCall | None + bufsize: int + +# re-exports from `_core._multiarray_umath` that are used by `_core._ufunc_config` + +NAN: Final[float] = float("nan") +PINF: Final[float] = float("+inf") +NINF: Final[float] = float("-inf") +PZERO: Final[float] = +0.0 +NZERO: Final[float] = -0.0 +_UFUNC_API: Final[CapsuleType] = ... +_extobj_contextvar: Final[contextvars.ContextVar[CapsuleType]] = ... + +def _get_extobj_dict() -> _ExtOjbDict: ... +def _make_extobj(*, all: _ErrKind = ..., **kwargs: Unpack[_ExtOjbDict]) -> CapsuleType: ... diff --git a/numpy/_expired_attrs_2_0.py b/numpy/_expired_attrs_2_0.py index 1397134e3f8c..2eebf95bc558 100644 --- a/numpy/_expired_attrs_2_0.py +++ b/numpy/_expired_attrs_2_0.py @@ -61,7 +61,6 @@ "or use `typing.deprecated`.", "deprecate_with_doc": "Emit `DeprecationWarning` with `warnings.warn` " "directly, or use `typing.deprecated`.", - "disp": "Use your own printing function instead.", "find_common_type": "Use `numpy.promote_types` or `numpy.result_type` instead. " "To achieve semantics for the `scalar_types` argument, use " diff --git a/numpy/_expired_attrs_2_0.pyi b/numpy/_expired_attrs_2_0.pyi index 14524689c1c5..de6c2d10f9b0 100644 --- a/numpy/_expired_attrs_2_0.pyi +++ b/numpy/_expired_attrs_2_0.pyi @@ -47,7 +47,6 @@ class _ExpiredAttributesType(TypedDict): recfromtxt: str deprecate: str deprecate_with_doc: str - disp: str find_common_type: str round_: str get_array_wrap: str diff --git a/numpy/_globals.py b/numpy/_globals.py index 5f838ba91544..ada8d5c41af0 100644 --- a/numpy/_globals.py +++ b/numpy/_globals.py @@ -94,3 +94,28 @@ def __bool__(self): return False raise ValueError(f"{self} is neither True nor False.") + + +class _SignatureDescriptor: + # A descriptor to store on the ufunc __dict__ that avoids definig a + # signature for the ufunc class/type but allows the instance to have one. + # This is needed because inspect.signature() chokes on normal properties + # (as of 3.14 at least). + # We could also set __signature__ on the instance but this allows deferred + # computation of the signature. + def __get__(self, obj, objtype=None): + # Delay import, not a critical path but need to avoid circular import. + from numpy._core._internal import _ufunc_inspect_signature_builder + + if obj is None: + # could also return None, which is accepted as "not set" by + # inspect.signature(). + raise AttributeError( + "type object 'numpy.ufunc' has no attribute '__signature__'") + + # Store on the instance, after this the descriptor won't be used. + obj.__signature__ = _ufunc_inspect_signature_builder(obj) + return obj.__signature__ + + +_signature_descriptor = _SignatureDescriptor() diff --git a/numpy/_pyinstaller/hook-numpy.py b/numpy/_pyinstaller/hook-numpy.py index 61c224b33810..b6f15e6edde8 100644 --- a/numpy/_pyinstaller/hook-numpy.py +++ b/numpy/_pyinstaller/hook-numpy.py @@ -32,5 +32,4 @@ "f2py", "setuptools", "distutils", - "numpy.distutils", ] diff --git a/numpy/_pyinstaller/hook-numpy.pyi b/numpy/_pyinstaller/hook-numpy.pyi index 2642996dad7e..6da4914d7e5a 100644 --- a/numpy/_pyinstaller/hook-numpy.pyi +++ b/numpy/_pyinstaller/hook-numpy.pyi @@ -1,13 +1,6 @@ from typing import Final -# from `PyInstaller.compat` -is_conda: Final[bool] -is_pure_conda: Final[bool] +binaries: Final[list[tuple[str, str]]] = ... -# from `PyInstaller.utils.hooks` -def is_module_satisfies(requirements: str, version: None = None, version_attr: None = None) -> bool: ... - -binaries: Final[list[tuple[str, str]]] - -hiddenimports: Final[list[str]] -excludedimports: Final[list[str]] +hiddenimports: Final[list[str]] = ... +excludedimports: Final[list[str]] = ... diff --git a/numpy/_pytesttester.py b/numpy/_pytesttester.py index 77342e44aea0..25f5300a74ac 100644 --- a/numpy/_pytesttester.py +++ b/numpy/_pytesttester.py @@ -123,8 +123,6 @@ def __call__(self, label='fast', verbose=1, extra_argv=None, True """ - import warnings - import pytest module = sys.modules[self.module_name] @@ -136,14 +134,6 @@ def __call__(self, label='fast', verbose=1, extra_argv=None, # offset verbosity. The "-q" cancels a "-v". pytest_args += ["-q"] - if sys.version_info < (3, 12): - with warnings.catch_warnings(): - warnings.simplefilter("always") - # Filter out distutils cpu warnings (could be localized to - # distutils tests). ASV has problems with top level import, - # so fetch module for suppression here. - from numpy.distutils import cpuinfo # noqa: F401 - # Filter out annoying import messages. Want these in both develop and # release mode. pytest_args += [ diff --git a/numpy/_pytesttester.pyi b/numpy/_pytesttester.pyi index a12abb1c1a10..bd71239314b4 100644 --- a/numpy/_pytesttester.pyi +++ b/numpy/_pytesttester.pyi @@ -8,11 +8,11 @@ class PytestTester: def __init__(self, module_name: str) -> None: ... def __call__( self, - label: L["fast", "full"] = ..., - verbose: int = ..., - extra_argv: Iterable[str] | None = ..., - doctests: L[False] = ..., - coverage: bool = ..., - durations: int = ..., - tests: Iterable[str] | None = ..., + label: L["fast", "full"] = "fast", + verbose: int = 1, + extra_argv: Iterable[str] | None = None, + doctests: L[False] = False, + coverage: bool = False, + durations: int = -1, + tests: Iterable[str] | None = None, ) -> bool: ... diff --git a/numpy/_typing/__init__.py b/numpy/_typing/__init__.py index 16a7eee66ebd..4de797bd4e37 100644 --- a/numpy/_typing/__init__.py +++ b/numpy/_typing/__init__.py @@ -1,148 +1,148 @@ """Private counterpart of ``numpy.typing``.""" -from ._array_like import ArrayLike as ArrayLike -from ._array_like import NDArray as NDArray -from ._array_like import _ArrayLike as _ArrayLike -from ._array_like import _ArrayLikeAnyString_co as _ArrayLikeAnyString_co -from ._array_like import _ArrayLikeBool_co as _ArrayLikeBool_co -from ._array_like import _ArrayLikeBytes_co as _ArrayLikeBytes_co -from ._array_like import _ArrayLikeComplex128_co as _ArrayLikeComplex128_co -from ._array_like import _ArrayLikeComplex_co as _ArrayLikeComplex_co -from ._array_like import _ArrayLikeDT64_co as _ArrayLikeDT64_co -from ._array_like import _ArrayLikeFloat64_co as _ArrayLikeFloat64_co -from ._array_like import _ArrayLikeFloat_co as _ArrayLikeFloat_co -from ._array_like import _ArrayLikeInt as _ArrayLikeInt -from ._array_like import _ArrayLikeInt_co as _ArrayLikeInt_co -from ._array_like import _ArrayLikeNumber_co as _ArrayLikeNumber_co -from ._array_like import _ArrayLikeObject_co as _ArrayLikeObject_co -from ._array_like import _ArrayLikeStr_co as _ArrayLikeStr_co -from ._array_like import _ArrayLikeString_co as _ArrayLikeString_co -from ._array_like import _ArrayLikeTD64_co as _ArrayLikeTD64_co -from ._array_like import _ArrayLikeUInt_co as _ArrayLikeUInt_co -from ._array_like import _ArrayLikeVoid_co as _ArrayLikeVoid_co -from ._array_like import _FiniteNestedSequence as _FiniteNestedSequence -from ._array_like import _SupportsArray as _SupportsArray -from ._array_like import _SupportsArrayFunc as _SupportsArrayFunc +from ._array_like import ( + ArrayLike as ArrayLike, + NDArray as NDArray, + _ArrayLike as _ArrayLike, + _ArrayLikeAnyString_co as _ArrayLikeAnyString_co, + _ArrayLikeBool_co as _ArrayLikeBool_co, + _ArrayLikeBytes_co as _ArrayLikeBytes_co, + _ArrayLikeComplex128_co as _ArrayLikeComplex128_co, + _ArrayLikeComplex_co as _ArrayLikeComplex_co, + _ArrayLikeDT64_co as _ArrayLikeDT64_co, + _ArrayLikeFloat64_co as _ArrayLikeFloat64_co, + _ArrayLikeFloat_co as _ArrayLikeFloat_co, + _ArrayLikeInt as _ArrayLikeInt, + _ArrayLikeInt_co as _ArrayLikeInt_co, + _ArrayLikeNumber_co as _ArrayLikeNumber_co, + _ArrayLikeObject_co as _ArrayLikeObject_co, + _ArrayLikeStr_co as _ArrayLikeStr_co, + _ArrayLikeString_co as _ArrayLikeString_co, + _ArrayLikeTD64_co as _ArrayLikeTD64_co, + _ArrayLikeUInt_co as _ArrayLikeUInt_co, + _ArrayLikeVoid_co as _ArrayLikeVoid_co, + _FiniteNestedSequence as _FiniteNestedSequence, + _SupportsArray as _SupportsArray, + _SupportsArrayFunc as _SupportsArrayFunc, +) # -from ._char_codes import _BoolCodes as _BoolCodes -from ._char_codes import _ByteCodes as _ByteCodes -from ._char_codes import _BytesCodes as _BytesCodes -from ._char_codes import _CDoubleCodes as _CDoubleCodes -from ._char_codes import _CharacterCodes as _CharacterCodes -from ._char_codes import _CLongDoubleCodes as _CLongDoubleCodes -from ._char_codes import _Complex64Codes as _Complex64Codes -from ._char_codes import _Complex128Codes as _Complex128Codes -from ._char_codes import _ComplexFloatingCodes as _ComplexFloatingCodes -from ._char_codes import _CSingleCodes as _CSingleCodes -from ._char_codes import _DoubleCodes as _DoubleCodes -from ._char_codes import _DT64Codes as _DT64Codes -from ._char_codes import _FlexibleCodes as _FlexibleCodes -from ._char_codes import _Float16Codes as _Float16Codes -from ._char_codes import _Float32Codes as _Float32Codes -from ._char_codes import _Float64Codes as _Float64Codes -from ._char_codes import _FloatingCodes as _FloatingCodes -from ._char_codes import _GenericCodes as _GenericCodes -from ._char_codes import _HalfCodes as _HalfCodes -from ._char_codes import _InexactCodes as _InexactCodes -from ._char_codes import _Int8Codes as _Int8Codes -from ._char_codes import _Int16Codes as _Int16Codes -from ._char_codes import _Int32Codes as _Int32Codes -from ._char_codes import _Int64Codes as _Int64Codes -from ._char_codes import _IntCCodes as _IntCCodes -from ._char_codes import _IntCodes as _IntCodes -from ._char_codes import _IntegerCodes as _IntegerCodes -from ._char_codes import _IntPCodes as _IntPCodes -from ._char_codes import _LongCodes as _LongCodes -from ._char_codes import _LongDoubleCodes as _LongDoubleCodes -from ._char_codes import _LongLongCodes as _LongLongCodes -from ._char_codes import _NumberCodes as _NumberCodes -from ._char_codes import _ObjectCodes as _ObjectCodes -from ._char_codes import _ShortCodes as _ShortCodes -from ._char_codes import _SignedIntegerCodes as _SignedIntegerCodes -from ._char_codes import _SingleCodes as _SingleCodes -from ._char_codes import _StrCodes as _StrCodes -from ._char_codes import _StringCodes as _StringCodes -from ._char_codes import _TD64Codes as _TD64Codes -from ._char_codes import _UByteCodes as _UByteCodes -from ._char_codes import _UInt8Codes as _UInt8Codes -from ._char_codes import _UInt16Codes as _UInt16Codes -from ._char_codes import _UInt32Codes as _UInt32Codes -from ._char_codes import _UInt64Codes as _UInt64Codes -from ._char_codes import _UIntCCodes as _UIntCCodes -from ._char_codes import _UIntCodes as _UIntCodes -from ._char_codes import _UIntPCodes as _UIntPCodes -from ._char_codes import _ULongCodes as _ULongCodes -from ._char_codes import _ULongLongCodes as _ULongLongCodes -from ._char_codes import _UnsignedIntegerCodes as _UnsignedIntegerCodes -from ._char_codes import _UShortCodes as _UShortCodes -from ._char_codes import _VoidCodes as _VoidCodes +from ._char_codes import ( + _BoolCodes as _BoolCodes, + _BytesCodes as _BytesCodes, + _CharacterCodes as _CharacterCodes, + _CLongDoubleCodes as _CLongDoubleCodes, + _Complex64Codes as _Complex64Codes, + _Complex128Codes as _Complex128Codes, + _ComplexFloatingCodes as _ComplexFloatingCodes, + _DT64Codes as _DT64Codes, + _FlexibleCodes as _FlexibleCodes, + _Float16Codes as _Float16Codes, + _Float32Codes as _Float32Codes, + _Float64Codes as _Float64Codes, + _FloatingCodes as _FloatingCodes, + _GenericCodes as _GenericCodes, + _InexactCodes as _InexactCodes, + _Int8Codes as _Int8Codes, + _Int16Codes as _Int16Codes, + _Int32Codes as _Int32Codes, + _Int64Codes as _Int64Codes, + _IntCCodes as _IntCCodes, + _IntegerCodes as _IntegerCodes, + _IntPCodes as _IntPCodes, + _LongCodes as _LongCodes, + _LongDoubleCodes as _LongDoubleCodes, + _LongLongCodes as _LongLongCodes, + _NumberCodes as _NumberCodes, + _ObjectCodes as _ObjectCodes, + _SignedIntegerCodes as _SignedIntegerCodes, + _StrCodes as _StrCodes, + _StringCodes as _StringCodes, + _TD64Codes as _TD64Codes, + _UInt8Codes as _UInt8Codes, + _UInt16Codes as _UInt16Codes, + _UInt32Codes as _UInt32Codes, + _UInt64Codes as _UInt64Codes, + _UIntCCodes as _UIntCCodes, + _UIntPCodes as _UIntPCodes, + _ULongCodes as _ULongCodes, + _ULongLongCodes as _ULongLongCodes, + _UnsignedIntegerCodes as _UnsignedIntegerCodes, + _VoidCodes as _VoidCodes, +) # -from ._dtype_like import DTypeLike as DTypeLike -from ._dtype_like import _DTypeLike as _DTypeLike -from ._dtype_like import _DTypeLikeBool as _DTypeLikeBool -from ._dtype_like import _DTypeLikeBytes as _DTypeLikeBytes -from ._dtype_like import _DTypeLikeComplex as _DTypeLikeComplex -from ._dtype_like import _DTypeLikeComplex_co as _DTypeLikeComplex_co -from ._dtype_like import _DTypeLikeDT64 as _DTypeLikeDT64 -from ._dtype_like import _DTypeLikeFloat as _DTypeLikeFloat -from ._dtype_like import _DTypeLikeInt as _DTypeLikeInt -from ._dtype_like import _DTypeLikeObject as _DTypeLikeObject -from ._dtype_like import _DTypeLikeStr as _DTypeLikeStr -from ._dtype_like import _DTypeLikeTD64 as _DTypeLikeTD64 -from ._dtype_like import _DTypeLikeUInt as _DTypeLikeUInt -from ._dtype_like import _DTypeLikeVoid as _DTypeLikeVoid -from ._dtype_like import _SupportsDType as _SupportsDType -from ._dtype_like import _VoidDTypeLike as _VoidDTypeLike +from ._dtype_like import ( + DTypeLike as DTypeLike, + _DTypeLike as _DTypeLike, + _DTypeLikeBool as _DTypeLikeBool, + _DTypeLikeBytes as _DTypeLikeBytes, + _DTypeLikeComplex as _DTypeLikeComplex, + _DTypeLikeComplex_co as _DTypeLikeComplex_co, + _DTypeLikeDT64 as _DTypeLikeDT64, + _DTypeLikeFloat as _DTypeLikeFloat, + _DTypeLikeInt as _DTypeLikeInt, + _DTypeLikeObject as _DTypeLikeObject, + _DTypeLikeStr as _DTypeLikeStr, + _DTypeLikeTD64 as _DTypeLikeTD64, + _DTypeLikeUInt as _DTypeLikeUInt, + _DTypeLikeVoid as _DTypeLikeVoid, + _HasDType as _HasDType, + _SupportsDType as _SupportsDType, + _VoidDTypeLike as _VoidDTypeLike, +) # -from ._nbit import _NBitByte as _NBitByte -from ._nbit import _NBitDouble as _NBitDouble -from ._nbit import _NBitHalf as _NBitHalf -from ._nbit import _NBitInt as _NBitInt -from ._nbit import _NBitIntC as _NBitIntC -from ._nbit import _NBitIntP as _NBitIntP -from ._nbit import _NBitLong as _NBitLong -from ._nbit import _NBitLongDouble as _NBitLongDouble -from ._nbit import _NBitLongLong as _NBitLongLong -from ._nbit import _NBitShort as _NBitShort -from ._nbit import _NBitSingle as _NBitSingle +from ._nbit import ( + _NBitByte as _NBitByte, + _NBitDouble as _NBitDouble, + _NBitHalf as _NBitHalf, + _NBitInt as _NBitInt, + _NBitIntC as _NBitIntC, + _NBitIntP as _NBitIntP, + _NBitLong as _NBitLong, + _NBitLongDouble as _NBitLongDouble, + _NBitLongLong as _NBitLongLong, + _NBitShort as _NBitShort, + _NBitSingle as _NBitSingle, +) # -from ._nbit_base import ( - NBitBase as NBitBase, # type: ignore[deprecated] # pyright: ignore[reportDeprecated] +from ._nbit_base import ( # type: ignore[deprecated] + NBitBase as NBitBase, # pyright: ignore[reportDeprecated] + _8Bit as _8Bit, + _16Bit as _16Bit, + _32Bit as _32Bit, + _64Bit as _64Bit, + _96Bit as _96Bit, + _128Bit as _128Bit, ) -from ._nbit_base import _8Bit as _8Bit -from ._nbit_base import _16Bit as _16Bit -from ._nbit_base import _32Bit as _32Bit -from ._nbit_base import _64Bit as _64Bit -from ._nbit_base import _96Bit as _96Bit -from ._nbit_base import _128Bit as _128Bit # from ._nested_sequence import _NestedSequence as _NestedSequence # -from ._scalars import _BoolLike_co as _BoolLike_co -from ._scalars import _CharLike_co as _CharLike_co -from ._scalars import _ComplexLike_co as _ComplexLike_co -from ._scalars import _FloatLike_co as _FloatLike_co -from ._scalars import _IntLike_co as _IntLike_co -from ._scalars import _NumberLike_co as _NumberLike_co -from ._scalars import _ScalarLike_co as _ScalarLike_co -from ._scalars import _TD64Like_co as _TD64Like_co -from ._scalars import _UIntLike_co as _UIntLike_co -from ._scalars import _VoidLike_co as _VoidLike_co +from ._scalars import ( + _BoolLike_co as _BoolLike_co, + _CharLike_co as _CharLike_co, + _ComplexLike_co as _ComplexLike_co, + _FloatLike_co as _FloatLike_co, + _IntLike_co as _IntLike_co, + _NumberLike_co as _NumberLike_co, + _ScalarLike_co as _ScalarLike_co, + _TD64Like_co as _TD64Like_co, + _UIntLike_co as _UIntLike_co, + _VoidLike_co as _VoidLike_co, +) # -from ._shape import _AnyShape as _AnyShape -from ._shape import _Shape as _Shape -from ._shape import _ShapeLike as _ShapeLike +from ._shape import _AnyShape as _AnyShape, _Shape as _Shape, _ShapeLike as _ShapeLike # -from ._ufunc import _GUFunc_Nin2_Nout1 as _GUFunc_Nin2_Nout1 -from ._ufunc import _UFunc_Nin1_Nout1 as _UFunc_Nin1_Nout1 -from ._ufunc import _UFunc_Nin1_Nout2 as _UFunc_Nin1_Nout2 -from ._ufunc import _UFunc_Nin2_Nout1 as _UFunc_Nin2_Nout1 -from ._ufunc import _UFunc_Nin2_Nout2 as _UFunc_Nin2_Nout2 +from ._ufunc import ( + _GUFunc_Nin2_Nout1 as _GUFunc_Nin2_Nout1, + _UFunc_Nin1_Nout1 as _UFunc_Nin1_Nout1, + _UFunc_Nin1_Nout2 as _UFunc_Nin1_Nout2, + _UFunc_Nin2_Nout1 as _UFunc_Nin2_Nout1, + _UFunc_Nin2_Nout2 as _UFunc_Nin2_Nout2, +) diff --git a/numpy/_typing/_add_docstring.py b/numpy/_typing/_add_docstring.py index 5330a6b3b715..883b890a1a16 100644 --- a/numpy/_typing/_add_docstring.py +++ b/numpy/_typing/_add_docstring.py @@ -137,10 +137,10 @@ def _parse_docstrings() -> str: >>> import numpy.typing as npt >>> print(npt.NDArray) - numpy.ndarray[tuple[typing.Any, ...], numpy.dtype[~_ScalarT]] + NDArray >>> print(npt.NDArray[np.float64]) - numpy.ndarray[tuple[typing.Any, ...], numpy.dtype[numpy.float64]] + NDArray[numpy.float64] >>> NDArrayInt = npt.NDArray[np.int_] >>> a: NDArrayInt = np.arange(10) diff --git a/numpy/_typing/_array_like.py b/numpy/_typing/_array_like.py index 6b071f4a0319..b8cb2c7872c1 100644 --- a/numpy/_typing/_array_like.py +++ b/numpy/_typing/_array_like.py @@ -1,27 +1,18 @@ -import sys -from collections.abc import Callable, Collection, Sequence -from typing import TYPE_CHECKING, Any, Protocol, TypeAlias, TypeVar, runtime_checkable +from collections.abc import Buffer, Callable, Collection, Sequence +from typing import TYPE_CHECKING, Any, Protocol, runtime_checkable import numpy as np -from numpy import dtype - -from ._nbit_base import _32Bit, _64Bit -from ._nested_sequence import _NestedSequence -from ._shape import _AnyShape if TYPE_CHECKING: - StringDType = np.dtypes.StringDType + from numpy.dtypes import StringDType else: - # at runtime outside of type checking importing this from numpy.dtypes - # would lead to a circular import from numpy._core.multiarray import StringDType -_T = TypeVar("_T") -_ScalarT = TypeVar("_ScalarT", bound=np.generic) -_DTypeT = TypeVar("_DTypeT", bound=dtype[Any]) -_DTypeT_co = TypeVar("_DTypeT_co", covariant=True, bound=dtype[Any]) +from ._nbit_base import _32Bit, _64Bit +from ._nested_sequence import _NestedSequence +from ._shape import _AnyShape -NDArray: TypeAlias = np.ndarray[_AnyShape, dtype[_ScalarT]] +type NDArray[ScalarT: np.generic] = np.ndarray[_AnyShape, np.dtype[ScalarT]] # The `_SupportsArray` protocol only cares about the default dtype # (i.e. `dtype=None` or no `dtype` parameter at all) of the to-be returned @@ -29,8 +20,8 @@ # Concrete implementations of the protocol are responsible for adding # any and all remaining overloads @runtime_checkable -class _SupportsArray(Protocol[_DTypeT_co]): - def __array__(self) -> np.ndarray[Any, _DTypeT_co]: ... +class _SupportsArray[DTypeT: np.dtype](Protocol): + def __array__(self) -> np.ndarray[Any, DTypeT]: ... @runtime_checkable @@ -46,61 +37,54 @@ def __array_function__( # TODO: Wait until mypy supports recursive objects in combination with typevars -_FiniteNestedSequence: TypeAlias = ( - _T - | Sequence[_T] - | Sequence[Sequence[_T]] - | Sequence[Sequence[Sequence[_T]]] - | Sequence[Sequence[Sequence[Sequence[_T]]]] +type _FiniteNestedSequence[T] = ( + T + | Sequence[T] + | Sequence[Sequence[T]] + | Sequence[Sequence[Sequence[T]]] + | Sequence[Sequence[Sequence[Sequence[T]]]] ) # A subset of `npt.ArrayLike` that can be parametrized w.r.t. `np.generic` -_ArrayLike: TypeAlias = ( - _SupportsArray[dtype[_ScalarT]] - | _NestedSequence[_SupportsArray[dtype[_ScalarT]]] +type _ArrayLike[ScalarT: np.generic] = ( + _SupportsArray[np.dtype[ScalarT]] + | _NestedSequence[_SupportsArray[np.dtype[ScalarT]]] ) # A union representing array-like objects; consists of two typevars: # One representing types that can be parametrized w.r.t. `np.dtype` # and another one for the rest -_DualArrayLike: TypeAlias = ( - _SupportsArray[_DTypeT] - | _NestedSequence[_SupportsArray[_DTypeT]] - | _T - | _NestedSequence[_T] +type _DualArrayLike[DTypeT: np.dtype, BuiltinT] = ( + _SupportsArray[DTypeT] + | _NestedSequence[_SupportsArray[DTypeT]] + | BuiltinT + | _NestedSequence[BuiltinT] ) -if sys.version_info >= (3, 12): - from collections.abc import Buffer as _Buffer -else: - @runtime_checkable - class _Buffer(Protocol): - def __buffer__(self, flags: int, /) -> memoryview: ... - -ArrayLike: TypeAlias = _Buffer | _DualArrayLike[dtype[Any], complex | bytes | str] +type ArrayLike = Buffer | _DualArrayLike[np.dtype, complex | bytes | str] # `ArrayLike_co`: array-like objects that can be coerced into `X` # given the casting rules `same_kind` -_ArrayLikeBool_co: TypeAlias = _DualArrayLike[dtype[np.bool], bool] -_ArrayLikeUInt_co: TypeAlias = _DualArrayLike[dtype[np.bool | np.unsignedinteger], bool] -_ArrayLikeInt_co: TypeAlias = _DualArrayLike[dtype[np.bool | np.integer], int] -_ArrayLikeFloat_co: TypeAlias = _DualArrayLike[dtype[np.bool | np.integer | np.floating], float] -_ArrayLikeComplex_co: TypeAlias = _DualArrayLike[dtype[np.bool | np.number], complex] -_ArrayLikeNumber_co: TypeAlias = _ArrayLikeComplex_co -_ArrayLikeTD64_co: TypeAlias = _DualArrayLike[dtype[np.bool | np.integer | np.timedelta64], int] -_ArrayLikeDT64_co: TypeAlias = _ArrayLike[np.datetime64] -_ArrayLikeObject_co: TypeAlias = _ArrayLike[np.object_] - -_ArrayLikeVoid_co: TypeAlias = _ArrayLike[np.void] -_ArrayLikeBytes_co: TypeAlias = _DualArrayLike[dtype[np.bytes_], bytes] -_ArrayLikeStr_co: TypeAlias = _DualArrayLike[dtype[np.str_], str] -_ArrayLikeString_co: TypeAlias = _DualArrayLike[StringDType, str] -_ArrayLikeAnyString_co: TypeAlias = _DualArrayLike[dtype[np.character] | StringDType, bytes | str] - -__Float64_co: TypeAlias = np.floating[_64Bit] | np.float32 | np.float16 | np.integer | np.bool -__Complex128_co: TypeAlias = np.number[_64Bit] | np.number[_32Bit] | np.float16 | np.integer | np.bool -_ArrayLikeFloat64_co: TypeAlias = _DualArrayLike[dtype[__Float64_co], float] -_ArrayLikeComplex128_co: TypeAlias = _DualArrayLike[dtype[__Complex128_co], complex] +type _ArrayLikeBool_co = _DualArrayLike[np.dtype[np.bool], bool] +type _ArrayLikeUInt_co = _DualArrayLike[np.dtype[np.bool | np.unsignedinteger], bool] +type _ArrayLikeInt_co = _DualArrayLike[np.dtype[np.bool | np.integer], int] +type _ArrayLikeFloat_co = _DualArrayLike[np.dtype[np.bool | np.integer | np.floating], float] +type _ArrayLikeComplex_co = _DualArrayLike[np.dtype[np.bool | np.number], complex] +type _ArrayLikeNumber_co = _ArrayLikeComplex_co +type _ArrayLikeTD64_co = _DualArrayLike[np.dtype[np.bool | np.integer | np.timedelta64], int] +type _ArrayLikeDT64_co = _ArrayLike[np.datetime64] +type _ArrayLikeObject_co = _ArrayLike[np.object_] + +type _ArrayLikeVoid_co = _ArrayLike[np.void] +type _ArrayLikeBytes_co = _DualArrayLike[np.dtype[np.bytes_], bytes] +type _ArrayLikeStr_co = _DualArrayLike[np.dtype[np.str_], str] +type _ArrayLikeString_co = _DualArrayLike[StringDType, str] +type _ArrayLikeAnyString_co = _DualArrayLike[np.dtype[np.character] | StringDType, bytes | str] + +type __Float64_co = np.floating[_64Bit] | np.float32 | np.float16 | np.integer | np.bool +type __Complex128_co = np.number[_64Bit] | np.number[_32Bit] | np.float16 | np.integer | np.bool +type _ArrayLikeFloat64_co = _DualArrayLike[np.dtype[__Float64_co], float] +type _ArrayLikeComplex128_co = _DualArrayLike[np.dtype[__Complex128_co], complex] # NOTE: This includes `builtins.bool`, but not `numpy.bool`. -_ArrayLikeInt: TypeAlias = _DualArrayLike[dtype[np.integer], int] +type _ArrayLikeInt = _DualArrayLike[np.dtype[np.integer], int] diff --git a/numpy/_typing/_callable.pyi b/numpy/_typing/_callable.pyi deleted file mode 100644 index 21df1d983fe6..000000000000 --- a/numpy/_typing/_callable.pyi +++ /dev/null @@ -1,366 +0,0 @@ -""" -A module with various ``typing.Protocol`` subclasses that implement -the ``__call__`` magic method. - -See the `Mypy documentation`_ on protocols for more details. - -.. _`Mypy documentation`: https://mypy.readthedocs.io/en/stable/protocols.html#callback-protocols - -""" - -from typing import ( - Any, - NoReturn, - Protocol, - TypeAlias, - TypeVar, - final, - overload, - type_check_only, -) - -import numpy as np -from numpy import ( - complex128, - complexfloating, - float64, - floating, - generic, - int8, - int_, - integer, - number, - signedinteger, - unsignedinteger, -) - -from . import NBitBase -from ._array_like import NDArray -from ._nbit import _NBitInt -from ._nested_sequence import _NestedSequence -from ._scalars import ( - _BoolLike_co, - _IntLike_co, - _NumberLike_co, -) - -_T1 = TypeVar("_T1") -_T2 = TypeVar("_T2") -_T1_contra = TypeVar("_T1_contra", contravariant=True) -_T2_contra = TypeVar("_T2_contra", contravariant=True) - -_2Tuple: TypeAlias = tuple[_T1, _T1] - -_NBit1 = TypeVar("_NBit1", bound=NBitBase) -_NBit2 = TypeVar("_NBit2", bound=NBitBase) - -_IntType = TypeVar("_IntType", bound=integer) -_FloatType = TypeVar("_FloatType", bound=floating) -_NumberType = TypeVar("_NumberType", bound=number) -_NumberType_co = TypeVar("_NumberType_co", covariant=True, bound=number) -_GenericType_co = TypeVar("_GenericType_co", covariant=True, bound=generic) - -@type_check_only -class _BoolOp(Protocol[_GenericType_co]): - @overload - def __call__(self, other: _BoolLike_co, /) -> _GenericType_co: ... - @overload # platform dependent - def __call__(self, other: int, /) -> int_: ... - @overload - def __call__(self, other: float, /) -> float64: ... - @overload - def __call__(self, other: complex, /) -> complex128: ... - @overload - def __call__(self, other: _NumberType, /) -> _NumberType: ... - -@type_check_only -class _BoolBitOp(Protocol[_GenericType_co]): - @overload - def __call__(self, other: _BoolLike_co, /) -> _GenericType_co: ... - @overload # platform dependent - def __call__(self, other: int, /) -> int_: ... - @overload - def __call__(self, other: _IntType, /) -> _IntType: ... - -@type_check_only -class _BoolSub(Protocol): - # Note that `other: bool` is absent here - @overload - def __call__(self, other: bool, /) -> NoReturn: ... - @overload # platform dependent - def __call__(self, other: int, /) -> int_: ... - @overload - def __call__(self, other: float, /) -> float64: ... - @overload - def __call__(self, other: complex, /) -> complex128: ... - @overload - def __call__(self, other: _NumberType, /) -> _NumberType: ... - -@type_check_only -class _BoolTrueDiv(Protocol): - @overload - def __call__(self, other: float | _IntLike_co, /) -> float64: ... - @overload - def __call__(self, other: complex, /) -> complex128: ... - @overload - def __call__(self, other: _NumberType, /) -> _NumberType: ... - -@type_check_only -class _BoolMod(Protocol): - @overload - def __call__(self, other: _BoolLike_co, /) -> int8: ... - @overload # platform dependent - def __call__(self, other: int, /) -> int_: ... - @overload - def __call__(self, other: float, /) -> float64: ... - @overload - def __call__(self, other: _IntType, /) -> _IntType: ... - @overload - def __call__(self, other: _FloatType, /) -> _FloatType: ... - -@type_check_only -class _BoolDivMod(Protocol): - @overload - def __call__(self, other: _BoolLike_co, /) -> _2Tuple[int8]: ... - @overload # platform dependent - def __call__(self, other: int, /) -> _2Tuple[int_]: ... - @overload - def __call__(self, other: float, /) -> _2Tuple[np.float64]: ... - @overload - def __call__(self, other: _IntType, /) -> _2Tuple[_IntType]: ... - @overload - def __call__(self, other: _FloatType, /) -> _2Tuple[_FloatType]: ... - -@type_check_only -class _IntTrueDiv(Protocol[_NBit1]): - @overload - def __call__(self, other: bool, /) -> floating[_NBit1]: ... - @overload - def __call__(self, other: int, /) -> floating[_NBit1] | floating[_NBitInt]: ... - @overload - def __call__(self, other: float, /) -> floating[_NBit1] | float64: ... - @overload - def __call__( - self, other: complex, / - ) -> complexfloating[_NBit1, _NBit1] | complex128: ... - @overload - def __call__( - self, other: integer[_NBit2], / - ) -> floating[_NBit1] | floating[_NBit2]: ... - -@type_check_only -class _UnsignedIntOp(Protocol[_NBit1]): - # NOTE: `uint64 + signedinteger -> float64` - @overload - def __call__(self, other: int, /) -> unsignedinteger[_NBit1]: ... - @overload - def __call__(self, other: float, /) -> float64: ... - @overload - def __call__(self, other: complex, /) -> complex128: ... - @overload - def __call__(self, other: unsignedinteger[_NBit2], /) -> unsignedinteger[_NBit1] | unsignedinteger[_NBit2]: ... - @overload - def __call__(self, other: signedinteger, /) -> Any: ... - -@type_check_only -class _UnsignedIntBitOp(Protocol[_NBit1]): - @overload - def __call__(self, other: bool, /) -> unsignedinteger[_NBit1]: ... - @overload - def __call__(self, other: int, /) -> signedinteger: ... - @overload - def __call__(self, other: signedinteger, /) -> signedinteger: ... - @overload - def __call__( - self, other: unsignedinteger[_NBit2], / - ) -> unsignedinteger[_NBit1] | unsignedinteger[_NBit2]: ... - -@type_check_only -class _UnsignedIntMod(Protocol[_NBit1]): - @overload - def __call__(self, other: bool, /) -> unsignedinteger[_NBit1]: ... - @overload - def __call__(self, other: int | signedinteger, /) -> Any: ... - @overload - def __call__(self, other: float, /) -> floating[_NBit1] | float64: ... - @overload - def __call__( - self, other: unsignedinteger[_NBit2], / - ) -> unsignedinteger[_NBit1] | unsignedinteger[_NBit2]: ... - -@type_check_only -class _UnsignedIntDivMod(Protocol[_NBit1]): - @overload - def __call__(self, other: bool, /) -> _2Tuple[signedinteger[_NBit1]]: ... - @overload - def __call__(self, other: int | signedinteger, /) -> _2Tuple[Any]: ... - @overload - def __call__(self, other: float, /) -> _2Tuple[floating[_NBit1]] | _2Tuple[float64]: ... - @overload - def __call__( - self, other: unsignedinteger[_NBit2], / - ) -> _2Tuple[unsignedinteger[_NBit1]] | _2Tuple[unsignedinteger[_NBit2]]: ... - -@type_check_only -class _SignedIntOp(Protocol[_NBit1]): - @overload - def __call__(self, other: int, /) -> signedinteger[_NBit1]: ... - @overload - def __call__(self, other: float, /) -> float64: ... - @overload - def __call__(self, other: complex, /) -> complex128: ... - @overload - def __call__(self, other: signedinteger[_NBit2], /) -> signedinteger[_NBit1] | signedinteger[_NBit2]: ... - -@type_check_only -class _SignedIntBitOp(Protocol[_NBit1]): - @overload - def __call__(self, other: bool, /) -> signedinteger[_NBit1]: ... - @overload - def __call__(self, other: int, /) -> signedinteger[_NBit1] | int_: ... - @overload - def __call__( - self, other: signedinteger[_NBit2], / - ) -> signedinteger[_NBit1] | signedinteger[_NBit2]: ... - -@type_check_only -class _SignedIntMod(Protocol[_NBit1]): - @overload - def __call__(self, other: bool, /) -> signedinteger[_NBit1]: ... - @overload - def __call__(self, other: int, /) -> signedinteger[_NBit1] | int_: ... - @overload - def __call__(self, other: float, /) -> floating[_NBit1] | float64: ... - @overload - def __call__( - self, other: signedinteger[_NBit2], / - ) -> signedinteger[_NBit1] | signedinteger[_NBit2]: ... - -@type_check_only -class _SignedIntDivMod(Protocol[_NBit1]): - @overload - def __call__(self, other: bool, /) -> _2Tuple[signedinteger[_NBit1]]: ... - @overload - def __call__(self, other: int, /) -> _2Tuple[signedinteger[_NBit1]] | _2Tuple[int_]: ... - @overload - def __call__(self, other: float, /) -> _2Tuple[floating[_NBit1]] | _2Tuple[float64]: ... - @overload - def __call__( - self, other: signedinteger[_NBit2], / - ) -> _2Tuple[signedinteger[_NBit1]] | _2Tuple[signedinteger[_NBit2]]: ... - -@type_check_only -class _FloatOp(Protocol[_NBit1]): - @overload - def __call__(self, other: int, /) -> floating[_NBit1]: ... - @overload - def __call__(self, other: float, /) -> floating[_NBit1] | float64: ... - @overload - def __call__( - self, other: complex, / - ) -> complexfloating[_NBit1, _NBit1] | complex128: ... - @overload - def __call__( - self, other: integer[_NBit2] | floating[_NBit2], / - ) -> floating[_NBit1] | floating[_NBit2]: ... - -@type_check_only -class _FloatMod(Protocol[_NBit1]): - @overload - def __call__(self, other: bool, /) -> floating[_NBit1]: ... - @overload - def __call__(self, other: int, /) -> floating[_NBit1] | floating[_NBitInt]: ... - @overload - def __call__(self, other: float, /) -> floating[_NBit1] | float64: ... - @overload - def __call__( - self, other: integer[_NBit2] | floating[_NBit2], / - ) -> floating[_NBit1] | floating[_NBit2]: ... - -class _FloatDivMod(Protocol[_NBit1]): - @overload - def __call__(self, other: bool, /) -> _2Tuple[floating[_NBit1]]: ... - @overload - def __call__( - self, other: int, / - ) -> _2Tuple[floating[_NBit1]] | _2Tuple[floating[_NBitInt]]: ... - @overload - def __call__( - self, other: float, / - ) -> _2Tuple[floating[_NBit1]] | _2Tuple[float64]: ... - @overload - def __call__( - self, other: integer[_NBit2] | floating[_NBit2], / - ) -> _2Tuple[floating[_NBit1]] | _2Tuple[floating[_NBit2]]: ... - -@type_check_only -class _NumberOp(Protocol): - def __call__(self, other: _NumberLike_co, /) -> Any: ... - -@final -@type_check_only -class _SupportsLT(Protocol): - def __lt__(self, other: Any, /) -> Any: ... - -@final -@type_check_only -class _SupportsLE(Protocol): - def __le__(self, other: Any, /) -> Any: ... - -@final -@type_check_only -class _SupportsGT(Protocol): - def __gt__(self, other: Any, /) -> Any: ... - -@final -@type_check_only -class _SupportsGE(Protocol): - def __ge__(self, other: Any, /) -> Any: ... - -@final -@type_check_only -class _ComparisonOpLT(Protocol[_T1_contra, _T2_contra]): - @overload - def __call__(self, other: _T1_contra, /) -> np.bool: ... - @overload - def __call__(self, other: _T2_contra, /) -> NDArray[np.bool]: ... - @overload - def __call__(self, other: _NestedSequence[_SupportsGT], /) -> NDArray[np.bool]: ... - @overload - def __call__(self, other: _SupportsGT, /) -> np.bool: ... - -@final -@type_check_only -class _ComparisonOpLE(Protocol[_T1_contra, _T2_contra]): - @overload - def __call__(self, other: _T1_contra, /) -> np.bool: ... - @overload - def __call__(self, other: _T2_contra, /) -> NDArray[np.bool]: ... - @overload - def __call__(self, other: _NestedSequence[_SupportsGE], /) -> NDArray[np.bool]: ... - @overload - def __call__(self, other: _SupportsGE, /) -> np.bool: ... - -@final -@type_check_only -class _ComparisonOpGT(Protocol[_T1_contra, _T2_contra]): - @overload - def __call__(self, other: _T1_contra, /) -> np.bool: ... - @overload - def __call__(self, other: _T2_contra, /) -> NDArray[np.bool]: ... - @overload - def __call__(self, other: _NestedSequence[_SupportsLT], /) -> NDArray[np.bool]: ... - @overload - def __call__(self, other: _SupportsLT, /) -> np.bool: ... - -@final -@type_check_only -class _ComparisonOpGE(Protocol[_T1_contra, _T2_contra]): - @overload - def __call__(self, other: _T1_contra, /) -> np.bool: ... - @overload - def __call__(self, other: _T2_contra, /) -> NDArray[np.bool]: ... - @overload - def __call__(self, other: _NestedSequence[_SupportsGT], /) -> NDArray[np.bool]: ... - @overload - def __call__(self, other: _SupportsGT, /) -> np.bool: ... diff --git a/numpy/_typing/_char_codes.py b/numpy/_typing/_char_codes.py index 7b6fad228d56..518f9b473e4a 100644 --- a/numpy/_typing/_char_codes.py +++ b/numpy/_typing/_char_codes.py @@ -1,152 +1,104 @@ from typing import Literal -_BoolCodes = Literal[ - "bool", "bool_", - "?", "|?", "=?", "?", - "b1", "|b1", "=b1", "b1", -] # fmt: skip +type _BoolCodes = Literal["bool", "bool_", "?", "b1", "|b1", "=b1", "b1"] + +type _Int8Codes = Literal["int8", "byte", "b", "i1", "|i1", "=i1", "i1"] +type _Int16Codes = Literal["int16", "short", "h", "i2", "|i2", "=i2", "i2"] +type _Int32Codes = Literal["int32", "i4", "|i4", "=i4", "i4"] +type _Int64Codes = Literal["int64", "i8", "|i8", "=i8", "i8"] + +type _UInt8Codes = Literal["uint8", "ubyte", "B", "u1", "|u1", "=u1", "u1"] +type _UInt16Codes = Literal["uint16", "ushort", "H", "u2", "|u2", "=u2", "u2"] +type _UInt32Codes = Literal["uint32", "u4", "|u4", "=u4", "u4"] +type _UInt64Codes = Literal["uint64", "u8", "|u8", "=u8", "u8"] + +type _IntCCodes = Literal["intc", "i", "|i", "=i", "i"] +type _LongCodes = Literal["long", "l", "|l", "=l", "l"] +type _LongLongCodes = Literal["longlong", "q", "|q", "=q", "q"] +type _IntPCodes = Literal["intp", "int", "int_", "n", "|n", "=n", "n"] + +type _UIntCCodes = Literal["uintc", "I", "|I", "=I", "I"] +type _ULongCodes = Literal["ulong", "L", "|L", "=L", "L"] +type _ULongLongCodes = Literal["ulonglong", "Q", "|Q", "=Q", "Q"] +type _UIntPCodes = Literal["uintp", "uint", "N", "|N", "=N", "N"] + +type _Float16Codes = Literal["float16", "half", "e", "f2", "|f2", "=f2", "f2"] +type _Float32Codes = Literal["float32", "single", "f", "f4", "|f4", "=f4", "f4"] +type _Float64Codes = Literal[ + "float64", "float", "double", "d", "f8", "|f8", "=f8", "f8" +] -_UInt8Codes = Literal["uint8", "u1", "|u1", "=u1", "u1"] -_UInt16Codes = Literal["uint16", "u2", "|u2", "=u2", "u2"] -_UInt32Codes = Literal["uint32", "u4", "|u4", "=u4", "u4"] -_UInt64Codes = Literal["uint64", "u8", "|u8", "=u8", "u8"] - -_Int8Codes = Literal["int8", "i1", "|i1", "=i1", "i1"] -_Int16Codes = Literal["int16", "i2", "|i2", "=i2", "i2"] -_Int32Codes = Literal["int32", "i4", "|i4", "=i4", "i4"] -_Int64Codes = Literal["int64", "i8", "|i8", "=i8", "i8"] - -_Float16Codes = Literal["float16", "f2", "|f2", "=f2", "f2"] -_Float32Codes = Literal["float32", "f4", "|f4", "=f4", "f4"] -_Float64Codes = Literal["float64", "f8", "|f8", "=f8", "f8"] - -_Complex64Codes = Literal["complex64", "c8", "|c8", "=c8", "c8"] -_Complex128Codes = Literal["complex128", "c16", "|c16", "=c16", "c16"] - -_ByteCodes = Literal["byte", "b", "|b", "=b", "b"] -_ShortCodes = Literal["short", "h", "|h", "=h", "h"] -_IntCCodes = Literal["intc", "i", "|i", "=i", "i"] -_IntPCodes = Literal["intp", "int", "int_", "n", "|n", "=n", "n"] -_LongCodes = Literal["long", "l", "|l", "=l", "l"] -_IntCodes = _IntPCodes -_LongLongCodes = Literal["longlong", "q", "|q", "=q", "q"] - -_UByteCodes = Literal["ubyte", "B", "|B", "=B", "B"] -_UShortCodes = Literal["ushort", "H", "|H", "=H", "H"] -_UIntCCodes = Literal["uintc", "I", "|I", "=I", "I"] -_UIntPCodes = Literal["uintp", "uint", "N", "|N", "=N", "N"] -_ULongCodes = Literal["ulong", "L", "|L", "=L", "L"] -_UIntCodes = _UIntPCodes -_ULongLongCodes = Literal["ulonglong", "Q", "|Q", "=Q", "Q"] - -_HalfCodes = Literal["half", "e", "|e", "=e", "e"] -_SingleCodes = Literal["single", "f", "|f", "=f", "f"] -_DoubleCodes = Literal["double", "float", "d", "|d", "=d", "d"] -_LongDoubleCodes = Literal["longdouble", "g", "|g", "=g", "g"] - -_CSingleCodes = Literal["csingle", "F", "|F", "=F", "F"] -_CDoubleCodes = Literal["cdouble", "complex", "D", "|D", "=D", "D"] -_CLongDoubleCodes = Literal["clongdouble", "G", "|G", "=G", "G"] - -_StrCodes = Literal["str", "str_", "unicode", "U", "|U", "=U", "U"] -_BytesCodes = Literal["bytes", "bytes_", "S", "|S", "=S", "S"] -_VoidCodes = Literal["void", "V", "|V", "=V", "V"] -_ObjectCodes = Literal["object", "object_", "O", "|O", "=O", "O"] - -_DT64Codes = Literal[ - "datetime64", "|datetime64", "=datetime64", - "datetime64", - "datetime64[Y]", "|datetime64[Y]", "=datetime64[Y]", - "datetime64[Y]", - "datetime64[M]", "|datetime64[M]", "=datetime64[M]", - "datetime64[M]", - "datetime64[W]", "|datetime64[W]", "=datetime64[W]", - "datetime64[W]", - "datetime64[D]", "|datetime64[D]", "=datetime64[D]", - "datetime64[D]", - "datetime64[h]", "|datetime64[h]", "=datetime64[h]", - "datetime64[h]", - "datetime64[m]", "|datetime64[m]", "=datetime64[m]", - "datetime64[m]", - "datetime64[s]", "|datetime64[s]", "=datetime64[s]", - "datetime64[s]", - "datetime64[ms]", "|datetime64[ms]", "=datetime64[ms]", - "datetime64[ms]", - "datetime64[us]", "|datetime64[us]", "=datetime64[us]", - "datetime64[us]", - "datetime64[ns]", "|datetime64[ns]", "=datetime64[ns]", - "datetime64[ns]", - "datetime64[ps]", "|datetime64[ps]", "=datetime64[ps]", - "datetime64[ps]", - "datetime64[fs]", "|datetime64[fs]", "=datetime64[fs]", - "datetime64[fs]", - "datetime64[as]", "|datetime64[as]", "=datetime64[as]", - "datetime64[as]", - "M", "|M", "=M", "M", - "M8", "|M8", "=M8", "M8", - "M8[Y]", "|M8[Y]", "=M8[Y]", "M8[Y]", - "M8[M]", "|M8[M]", "=M8[M]", "M8[M]", - "M8[W]", "|M8[W]", "=M8[W]", "M8[W]", - "M8[D]", "|M8[D]", "=M8[D]", "M8[D]", - "M8[h]", "|M8[h]", "=M8[h]", "M8[h]", - "M8[m]", "|M8[m]", "=M8[m]", "M8[m]", - "M8[s]", "|M8[s]", "=M8[s]", "M8[s]", - "M8[ms]", "|M8[ms]", "=M8[ms]", "M8[ms]", - "M8[us]", "|M8[us]", "=M8[us]", "M8[us]", - "M8[ns]", "|M8[ns]", "=M8[ns]", "M8[ns]", - "M8[ps]", "|M8[ps]", "=M8[ps]", "M8[ps]", - "M8[fs]", "|M8[fs]", "=M8[fs]", "M8[fs]", - "M8[as]", "|M8[as]", "=M8[as]", "M8[as]", +type _LongDoubleCodes = Literal["longdouble", "g", "|g", "=g", "g"] + +type _Complex64Codes = Literal[ + "complex64", "csingle", "F", "c8", "|c8", "=c8", "c8" ] -_TD64Codes = Literal[ - "timedelta64", "|timedelta64", "=timedelta64", - "timedelta64", - "timedelta64[Y]", "|timedelta64[Y]", "=timedelta64[Y]", - "timedelta64[Y]", - "timedelta64[M]", "|timedelta64[M]", "=timedelta64[M]", - "timedelta64[M]", - "timedelta64[W]", "|timedelta64[W]", "=timedelta64[W]", - "timedelta64[W]", - "timedelta64[D]", "|timedelta64[D]", "=timedelta64[D]", - "timedelta64[D]", - "timedelta64[h]", "|timedelta64[h]", "=timedelta64[h]", - "timedelta64[h]", - "timedelta64[m]", "|timedelta64[m]", "=timedelta64[m]", - "timedelta64[m]", - "timedelta64[s]", "|timedelta64[s]", "=timedelta64[s]", - "timedelta64[s]", - "timedelta64[ms]", "|timedelta64[ms]", "=timedelta64[ms]", - "timedelta64[ms]", - "timedelta64[us]", "|timedelta64[us]", "=timedelta64[us]", - "timedelta64[us]", - "timedelta64[ns]", "|timedelta64[ns]", "=timedelta64[ns]", - "timedelta64[ns]", - "timedelta64[ps]", "|timedelta64[ps]", "=timedelta64[ps]", - "timedelta64[ps]", - "timedelta64[fs]", "|timedelta64[fs]", "=timedelta64[fs]", - "timedelta64[fs]", - "timedelta64[as]", "|timedelta64[as]", "=timedelta64[as]", - "timedelta64[as]", - "m", "|m", "=m", "m", - "m8", "|m8", "=m8", "m8", - "m8[Y]", "|m8[Y]", "=m8[Y]", "m8[Y]", - "m8[M]", "|m8[M]", "=m8[M]", "m8[M]", - "m8[W]", "|m8[W]", "=m8[W]", "m8[W]", - "m8[D]", "|m8[D]", "=m8[D]", "m8[D]", - "m8[h]", "|m8[h]", "=m8[h]", "m8[h]", - "m8[m]", "|m8[m]", "=m8[m]", "m8[m]", - "m8[s]", "|m8[s]", "=m8[s]", "m8[s]", - "m8[ms]", "|m8[ms]", "=m8[ms]", "m8[ms]", - "m8[us]", "|m8[us]", "=m8[us]", "m8[us]", - "m8[ns]", "|m8[ns]", "=m8[ns]", "m8[ns]", - "m8[ps]", "|m8[ps]", "=m8[ps]", "m8[ps]", - "m8[fs]", "|m8[fs]", "=m8[fs]", "m8[fs]", - "m8[as]", "|m8[as]", "=m8[as]", "m8[as]", + +type _Complex128Codes = Literal[ + "complex128", "complex", "cdouble", "D", "c16", "|c16", "=c16", "c16" ] +type _CLongDoubleCodes = Literal["clongdouble", "G", "|G", "=G", "G"] + +type _StrCodes = Literal["str", "str_", "unicode", "U", "|U", "=U", "U"] +type _BytesCodes = Literal["bytes", "bytes_", "S", "|S", "=S", "S"] +type _VoidCodes = Literal["void", "V", "|V", "=V", "V"] +type _ObjectCodes = Literal["object", "object_", "O", "|O", "=O", "O"] + +# datetime64 +type _DT64Codes_any = Literal["datetime64", "M", "M8", "|M8", "=M8", "M8"] +type _DT64Codes_date = Literal[ + "datetime64[Y]", "M8[Y]", "|M8[Y]", "=M8[Y]", "M8[Y]", + "datetime64[M]", "M8[M]", "|M8[M]", "=M8[M]", "M8[M]", + "datetime64[W]", "M8[W]", "|M8[W]", "=M8[W]", "M8[W]", + "datetime64[D]", "M8[D]", "|M8[D]", "=M8[D]", "M8[D]", +] # fmt: skip +type _DT64Codes_datetime = Literal[ + "datetime64[h]", "M8[h]", "|M8[h]", "=M8[h]", "M8[h]", + "datetime64[m]", "M8[m]", "|M8[m]", "=M8[m]", "M8[m]", + "datetime64[s]", "M8[s]", "|M8[s]", "=M8[s]", "M8[s]", + "datetime64[ms]", "M8[ms]", "|M8[ms]", "=M8[ms]", "M8[ms]", + "datetime64[us]", "M8[us]", "|M8[us]", "=M8[us]", "M8[us]", + "datetime64[Îŧs]", "M8[Îŧs]", "|M8[Îŧs]", "=M8[Îŧs]", "M8[Îŧs]", +] # fmt: skip +type _DT64Codes_int = Literal[ + "datetime64[ns]", "M8[ns]", "|M8[ns]", "=M8[ns]", "M8[ns]", + "datetime64[ps]", "M8[ps]", "|M8[ps]", "=M8[ps]", "M8[ps]", + "datetime64[fs]", "M8[fs]", "|M8[fs]", "=M8[fs]", "M8[fs]", + "datetime64[as]", "M8[as]", "|M8[as]", "=M8[as]", "M8[as]", +] # fmt: skip +type _DT64Codes = Literal[ + _DT64Codes_any, + _DT64Codes_date, + _DT64Codes_datetime, + _DT64Codes_int, +] + +# timedelta64 +type _TD64Codes_any = Literal["timedelta64", "m", "m8", "|m8", "=m8", "m8"] +type _TD64Codes_int = Literal[ + "timedelta64[Y]", "m8[Y]", "|m8[Y]", "=m8[Y]", "m8[Y]", + "timedelta64[M]", "m8[M]", "|m8[M]", "=m8[M]", "m8[M]", + "timedelta64[ns]", "m8[ns]", "|m8[ns]", "=m8[ns]", "m8[ns]", + "timedelta64[ps]", "m8[ps]", "|m8[ps]", "=m8[ps]", "m8[ps]", + "timedelta64[fs]", "m8[fs]", "|m8[fs]", "=m8[fs]", "m8[fs]", + "timedelta64[as]", "m8[as]", "|m8[as]", "=m8[as]", "m8[as]", +] # fmt: skip +type _TD64Codes_timedelta = Literal[ + "timedelta64[W]", "m8[W]", "|m8[W]", "=m8[W]", "m8[W]", + "timedelta64[D]", "m8[D]", "|m8[D]", "=m8[D]", "m8[D]", + "timedelta64[h]", "m8[h]", "|m8[h]", "=m8[h]", "m8[h]", + "timedelta64[m]", "m8[m]", "|m8[m]", "=m8[m]", "m8[m]", + "timedelta64[s]", "m8[s]", "|m8[s]", "=m8[s]", "m8[s]", + "timedelta64[ms]", "m8[ms]", "|m8[ms]", "=m8[ms]", "m8[ms]", + "timedelta64[us]", "m8[us]", "|m8[us]", "=m8[us]", "m8[us]", + "timedelta64[Îŧs]", "m8[Îŧs]", "|m8[Îŧs]", "=m8[Îŧs]", "m8[Îŧs]", +] # fmt: skip +type _TD64Codes = Literal[_TD64Codes_any, _TD64Codes_int, _TD64Codes_timedelta] + # NOTE: `StringDType' has no scalar type, and therefore has no name that can # be passed to the `dtype` constructor -_StringCodes = Literal["T", "|T", "=T", "T"] +type _StringCodes = Literal["T", "|T", "=T", "T"] # NOTE: Nested literals get flattened and de-duplicated at runtime, which isn't # the case for a `Union` of `Literal`s. @@ -154,54 +106,45 @@ # Another advantage of nesting, is that they always have a "flat" # `Literal.__args__`, which is a tuple of *literally* all its literal values. -_UnsignedIntegerCodes = Literal[ - _UInt8Codes, - _UInt16Codes, - _UInt32Codes, - _UInt64Codes, - _UIntCodes, - _UByteCodes, - _UShortCodes, - _UIntCCodes, - _ULongCodes, - _ULongLongCodes, -] -_SignedIntegerCodes = Literal[ +type _SignedIntegerCodes = Literal[ _Int8Codes, _Int16Codes, _Int32Codes, _Int64Codes, - _IntCodes, - _ByteCodes, - _ShortCodes, _IntCCodes, _LongCodes, _LongLongCodes, + _IntPCodes, +] +type _UnsignedIntegerCodes = Literal[ + _UInt8Codes, + _UInt16Codes, + _UInt32Codes, + _UInt64Codes, + _UIntCCodes, + _ULongCodes, + _ULongLongCodes, + _UIntPCodes, ] -_FloatingCodes = Literal[ +type _FloatingCodes = Literal[ _Float16Codes, _Float32Codes, _Float64Codes, - _HalfCodes, - _SingleCodes, - _DoubleCodes, - _LongDoubleCodes + _LongDoubleCodes, ] -_ComplexFloatingCodes = Literal[ +type _ComplexFloatingCodes = Literal[ _Complex64Codes, _Complex128Codes, - _CSingleCodes, - _CDoubleCodes, _CLongDoubleCodes, ] -_IntegerCodes = Literal[_UnsignedIntegerCodes, _SignedIntegerCodes] -_InexactCodes = Literal[_FloatingCodes, _ComplexFloatingCodes] -_NumberCodes = Literal[_IntegerCodes, _InexactCodes] +type _IntegerCodes = Literal[_UnsignedIntegerCodes, _SignedIntegerCodes] +type _InexactCodes = Literal[_FloatingCodes, _ComplexFloatingCodes] +type _NumberCodes = Literal[_IntegerCodes, _InexactCodes] -_CharacterCodes = Literal[_StrCodes, _BytesCodes] -_FlexibleCodes = Literal[_VoidCodes, _CharacterCodes] +type _CharacterCodes = Literal[_BytesCodes, _StrCodes] +type _FlexibleCodes = Literal[_CharacterCodes, _VoidCodes] -_GenericCodes = Literal[ +type _GenericCodes = Literal[ _BoolCodes, _NumberCodes, _FlexibleCodes, diff --git a/numpy/_typing/_dtype_like.py b/numpy/_typing/_dtype_like.py index c406b3098384..09ed1a0084de 100644 --- a/numpy/_typing/_dtype_like.py +++ b/numpy/_typing/_dtype_like.py @@ -1,12 +1,5 @@ -from collections.abc import Sequence # noqa: F811 -from typing import ( - Any, - Protocol, - TypeAlias, - TypedDict, - TypeVar, - runtime_checkable, -) +from collections.abc import Sequence +from typing import Any, NotRequired, Protocol, TypedDict, runtime_checkable import numpy as np @@ -25,41 +18,43 @@ _VoidCodes, ) -_ScalarT = TypeVar("_ScalarT", bound=np.generic) -_DTypeT_co = TypeVar("_DTypeT_co", bound=np.dtype, covariant=True) - -_DTypeLikeNested: TypeAlias = Any # TODO: wait for support for recursive types +type _DTypeLikeNested = Any # TODO: wait for support for recursive types -# Mandatory keys -class _DTypeDictBase(TypedDict): +class _DTypeDict(TypedDict): names: Sequence[str] formats: Sequence[_DTypeLikeNested] - - -# Mandatory + optional keys -class _DTypeDict(_DTypeDictBase, total=False): # Only `str` elements are usable as indexing aliases, # but `titles` can in principle accept any object - offsets: Sequence[int] - titles: Sequence[Any] - itemsize: int - aligned: bool + offsets: NotRequired[Sequence[int]] + titles: NotRequired[Sequence[Any]] + itemsize: NotRequired[int] + aligned: NotRequired[bool] # A protocol for anything with the dtype attribute @runtime_checkable -class _SupportsDType(Protocol[_DTypeT_co]): +class _HasDType[DTypeT: np.dtype](Protocol): @property - def dtype(self) -> _DTypeT_co: ... + def dtype(self) -> DTypeT: ... + + +class _HasNumPyDType[DTypeT: np.dtype](Protocol): + @property + def __numpy_dtype__(self, /) -> DTypeT: ... + + +type _SupportsDType[DTypeT: np.dtype] = _HasDType[DTypeT] | _HasNumPyDType[DTypeT] # A subset of `npt.DTypeLike` that can be parametrized w.r.t. `np.generic` -_DTypeLike: TypeAlias = type[_ScalarT] | np.dtype[_ScalarT] | _SupportsDType[np.dtype[_ScalarT]] +type _DTypeLike[ScalarT: np.generic] = ( + type[ScalarT] | np.dtype[ScalarT] | _SupportsDType[np.dtype[ScalarT]] +) # Would create a dtype[np.void] -_VoidDTypeLike: TypeAlias = ( +type _VoidDTypeLike = ( # If a tuple, then it can be either: # - (flexible_dtype, itemsize) # - (fixed_dtype, shape) @@ -80,31 +75,29 @@ def dtype(self) -> _DTypeT_co: ... # Aliases for commonly used dtype-like objects. # Note that the precision of `np.number` subclasses is ignored herein. -_DTypeLikeBool: TypeAlias = type[bool] | _DTypeLike[np.bool] | _BoolCodes -_DTypeLikeInt: TypeAlias = ( - type[int] | _DTypeLike[np.signedinteger] | _SignedIntegerCodes -) -_DTypeLikeUInt: TypeAlias = _DTypeLike[np.unsignedinteger] | _UnsignedIntegerCodes -_DTypeLikeFloat: TypeAlias = type[float] | _DTypeLike[np.floating] | _FloatingCodes -_DTypeLikeComplex: TypeAlias = ( +type _DTypeLikeBool = type[bool] | _DTypeLike[np.bool] | _BoolCodes +type _DTypeLikeInt = type[int] | _DTypeLike[np.signedinteger] | _SignedIntegerCodes +type _DTypeLikeUInt = _DTypeLike[np.unsignedinteger] | _UnsignedIntegerCodes +type _DTypeLikeFloat = type[float] | _DTypeLike[np.floating] | _FloatingCodes +type _DTypeLikeComplex = ( type[complex] | _DTypeLike[np.complexfloating] | _ComplexFloatingCodes ) -_DTypeLikeComplex_co: TypeAlias = ( +type _DTypeLikeComplex_co = ( type[complex] | _DTypeLike[np.bool | np.number] | _BoolCodes | _NumberCodes ) -_DTypeLikeDT64: TypeAlias = _DTypeLike[np.timedelta64] | _TD64Codes -_DTypeLikeTD64: TypeAlias = _DTypeLike[np.datetime64] | _DT64Codes -_DTypeLikeBytes: TypeAlias = type[bytes] | _DTypeLike[np.bytes_] | _BytesCodes -_DTypeLikeStr: TypeAlias = type[str] | _DTypeLike[np.str_] | _StrCodes -_DTypeLikeVoid: TypeAlias = ( +type _DTypeLikeDT64 = _DTypeLike[np.timedelta64] | _TD64Codes +type _DTypeLikeTD64 = _DTypeLike[np.datetime64] | _DT64Codes +type _DTypeLikeBytes = type[bytes] | _DTypeLike[np.bytes_] | _BytesCodes +type _DTypeLikeStr = type[str] | _DTypeLike[np.str_] | _StrCodes +type _DTypeLikeVoid = ( type[memoryview] | _DTypeLike[np.void] | _VoidDTypeLike | _VoidCodes ) -_DTypeLikeObject: TypeAlias = type[object] | _DTypeLike[np.object_] | _ObjectCodes +type _DTypeLikeObject = type[object] | _DTypeLike[np.object_] | _ObjectCodes # Anything that can be coerced into numpy.dtype. # Reference: https://docs.scipy.org/doc/numpy/reference/arrays.dtypes.html -DTypeLike: TypeAlias = _DTypeLike[Any] | _VoidDTypeLike | str | None +type DTypeLike = type | str | np.dtype | _SupportsDType[np.dtype] | _VoidDTypeLike # NOTE: while it is possible to provide the dtype as a dict of # dtype-like objects (e.g. `{'field1': ..., 'field2': ..., ...}`), diff --git a/numpy/_typing/_nbit.py b/numpy/_typing/_nbit.py index 60bce3245c7a..1ad5f017eeb9 100644 --- a/numpy/_typing/_nbit.py +++ b/numpy/_typing/_nbit.py @@ -1,19 +1,17 @@ """A module with the precisions of platform-specific `~numpy.number`s.""" -from typing import TypeAlias - from ._nbit_base import _8Bit, _16Bit, _32Bit, _64Bit, _96Bit, _128Bit # To-be replaced with a `npt.NBitBase` subclass by numpy's mypy plugin -_NBitByte: TypeAlias = _8Bit -_NBitShort: TypeAlias = _16Bit -_NBitIntC: TypeAlias = _32Bit -_NBitIntP: TypeAlias = _32Bit | _64Bit -_NBitInt: TypeAlias = _NBitIntP -_NBitLong: TypeAlias = _32Bit | _64Bit -_NBitLongLong: TypeAlias = _64Bit +type _NBitByte = _8Bit +type _NBitShort = _16Bit +type _NBitIntC = _32Bit +type _NBitIntP = _32Bit | _64Bit +type _NBitInt = _NBitIntP +type _NBitLong = _32Bit | _64Bit +type _NBitLongLong = _64Bit -_NBitHalf: TypeAlias = _16Bit -_NBitSingle: TypeAlias = _32Bit -_NBitDouble: TypeAlias = _64Bit -_NBitLongDouble: TypeAlias = _64Bit | _96Bit | _128Bit +type _NBitHalf = _16Bit +type _NBitSingle = _32Bit +type _NBitDouble = _64Bit +type _NBitLongDouble = _64Bit | _96Bit | _128Bit diff --git a/numpy/_typing/_nbit_base.py b/numpy/_typing/_nbit_base.py index 28d3e63c1769..28a60ecbe00f 100644 --- a/numpy/_typing/_nbit_base.py +++ b/numpy/_typing/_nbit_base.py @@ -30,14 +30,13 @@ class NBitBase: .. code-block:: python - >>> from typing import TypeVar, TYPE_CHECKING + >>> from typing import TYPE_CHECKING >>> import numpy as np >>> import numpy.typing as npt - >>> S = TypeVar("S", bound=npt.NBitBase) - >>> T = TypeVar("T", bound=npt.NBitBase) - - >>> def add(a: np.floating[S], b: np.integer[T]) -> np.floating[S | T]: + >>> def add[S: npt.NBitBase, T: npt.NBitBase]( + ... a: np.floating[S], b: np.integer[T] + ... ) -> np.floating[S | T]: ... return a + b >>> a = np.float16() diff --git a/numpy/_typing/_nbit_base.pyi b/numpy/_typing/_nbit_base.pyi index ccf8f5ceac45..bd317c896094 100644 --- a/numpy/_typing/_nbit_base.pyi +++ b/numpy/_typing/_nbit_base.pyi @@ -3,20 +3,19 @@ # mypy: disable-error-code=misc from typing import final - from typing_extensions import deprecated # Deprecated in NumPy 2.3, 2025-05-01 @deprecated( "`NBitBase` is deprecated and will be removed from numpy.typing in the " - "future. Use `@typing.overload` or a `TypeVar` with a scalar-type as upper " + "future. Use `@typing.overload` or a type parameter with a scalar-type as upper " "bound, instead. (deprecated in NumPy 2.3)", ) @final class NBitBase: ... @final -class _256Bit(NBitBase): ... +class _256Bit(NBitBase): ... # type: ignore[deprecated] @final class _128Bit(_256Bit): ... diff --git a/numpy/_typing/_nested_sequence.py b/numpy/_typing/_nested_sequence.py index e3362a9f21fe..13711be397e9 100644 --- a/numpy/_typing/_nested_sequence.py +++ b/numpy/_typing/_nested_sequence.py @@ -1,17 +1,15 @@ """A module containing the `_NestedSequence` protocol.""" -from typing import TYPE_CHECKING, Any, Protocol, TypeVar, runtime_checkable +from typing import TYPE_CHECKING, Any, Protocol, runtime_checkable if TYPE_CHECKING: from collections.abc import Iterator __all__ = ["_NestedSequence"] -_T_co = TypeVar("_T_co", covariant=True) - @runtime_checkable -class _NestedSequence(Protocol[_T_co]): +class _NestedSequence[T](Protocol): """A protocol for representing nested sequences. Warning @@ -54,7 +52,7 @@ def __len__(self, /) -> int: """Implement ``len(self)``.""" raise NotImplementedError - def __getitem__(self, index: int, /) -> "_T_co | _NestedSequence[_T_co]": + def __getitem__(self, index: int, /) -> "T | _NestedSequence[T]": """Implement ``self[x]``.""" raise NotImplementedError @@ -62,11 +60,11 @@ def __contains__(self, x: object, /) -> bool: """Implement ``x in self``.""" raise NotImplementedError - def __iter__(self, /) -> "Iterator[_T_co | _NestedSequence[_T_co]]": + def __iter__(self, /) -> "Iterator[T | _NestedSequence[T]]": """Implement ``iter(self)``.""" raise NotImplementedError - def __reversed__(self, /) -> "Iterator[_T_co | _NestedSequence[_T_co]]": + def __reversed__(self, /) -> "Iterator[T | _NestedSequence[T]]": """Implement ``reversed(self)``.""" raise NotImplementedError diff --git a/numpy/_typing/_scalars.py b/numpy/_typing/_scalars.py index b0de66d89aa1..2d36c4961c42 100644 --- a/numpy/_typing/_scalars.py +++ b/numpy/_typing/_scalars.py @@ -1,20 +1,20 @@ -from typing import Any, TypeAlias +from typing import Any import numpy as np # NOTE: `_StrLike_co` and `_BytesLike_co` are pointless, as `np.str_` and # `np.bytes_` are already subclasses of their builtin counterpart -_CharLike_co: TypeAlias = str | bytes +type _CharLike_co = str | bytes # The `Like_co` type-aliases below represent all scalars that can be # coerced into `` (with the casting rule `same_kind`) -_BoolLike_co: TypeAlias = bool | np.bool -_UIntLike_co: TypeAlias = bool | np.unsignedinteger | np.bool -_IntLike_co: TypeAlias = int | np.integer | np.bool -_FloatLike_co: TypeAlias = float | np.floating | np.integer | np.bool -_ComplexLike_co: TypeAlias = complex | np.number | np.bool -_NumberLike_co: TypeAlias = _ComplexLike_co -_TD64Like_co: TypeAlias = int | np.timedelta64 | np.integer | np.bool +type _BoolLike_co = bool | np.bool +type _UIntLike_co = bool | np.unsignedinteger | np.bool +type _IntLike_co = int | np.integer | np.bool +type _FloatLike_co = float | np.floating | np.integer | np.bool +type _ComplexLike_co = complex | np.number | np.bool +type _NumberLike_co = _ComplexLike_co +type _TD64Like_co = int | np.timedelta64 | np.integer | np.bool # `_VoidLike_co` is technically not a scalar, but it's close enough -_VoidLike_co: TypeAlias = tuple[Any, ...] | np.void -_ScalarLike_co: TypeAlias = complex | str | bytes | np.generic +type _VoidLike_co = tuple[Any, ...] | np.void +type _ScalarLike_co = complex | str | bytes | np.generic diff --git a/numpy/_typing/_shape.py b/numpy/_typing/_shape.py index e297aef2f554..132943b283c8 100644 --- a/numpy/_typing/_shape.py +++ b/numpy/_typing/_shape.py @@ -1,8 +1,8 @@ from collections.abc import Sequence -from typing import Any, SupportsIndex, TypeAlias +from typing import Any, SupportsIndex -_Shape: TypeAlias = tuple[int, ...] -_AnyShape: TypeAlias = tuple[Any, ...] +type _Shape = tuple[int, ...] +type _AnyShape = tuple[Any, ...] # Anything that can be coerced to a shape tuple -_ShapeLike: TypeAlias = SupportsIndex | Sequence[SupportsIndex] +type _ShapeLike = SupportsIndex | Sequence[SupportsIndex] diff --git a/numpy/_typing/_ufunc.pyi b/numpy/_typing/_ufunc.pyi index 766cde1ad420..b9dc5fd5b975 100644 --- a/numpy/_typing/_ufunc.pyi +++ b/numpy/_typing/_ufunc.pyi @@ -4,19 +4,19 @@ The signatures of the ufuncs are too varied to reasonably type with a single class. So instead, `ufunc` has been expanded into four private subclasses, one for each combination of `~ufunc.nin` and `~ufunc.nout`. -""" +""" # noqa: PYI021 +from _typeshed import Incomplete +from types import EllipsisType from typing import ( Any, - Generic, Literal, LiteralString, + Never, NoReturn, Protocol, SupportsIndex, - TypeAlias, TypedDict, - TypeVar, Unpack, overload, type_check_only, @@ -24,31 +24,19 @@ from typing import ( import numpy as np from numpy import _CastingKind, _OrderKACF, ufunc -from numpy.typing import NDArray -from ._array_like import ArrayLike, _ArrayLikeBool_co, _ArrayLikeInt_co +from ._array_like import ArrayLike, NDArray, _ArrayLikeBool_co, _ArrayLikeInt_co from ._dtype_like import DTypeLike from ._scalars import _ScalarLike_co from ._shape import _ShapeLike -_T = TypeVar("_T") -_2Tuple: TypeAlias = tuple[_T, _T] -_3Tuple: TypeAlias = tuple[_T, _T, _T] -_4Tuple: TypeAlias = tuple[_T, _T, _T, _T] +type _2Tuple[T] = tuple[T, T] +type _3Tuple[T] = tuple[T, T, T] +type _4Tuple[T] = tuple[T, T, T, T] -_2PTuple: TypeAlias = tuple[_T, _T, *tuple[_T, ...]] -_3PTuple: TypeAlias = tuple[_T, _T, _T, *tuple[_T, ...]] -_4PTuple: TypeAlias = tuple[_T, _T, _T, _T, *tuple[_T, ...]] - -_NTypes = TypeVar("_NTypes", bound=int, covariant=True) -_IDType = TypeVar("_IDType", covariant=True) -_NameType = TypeVar("_NameType", bound=LiteralString, covariant=True) -_Signature = TypeVar("_Signature", bound=LiteralString, covariant=True) - -_NIn = TypeVar("_NIn", bound=int, covariant=True) -_NOut = TypeVar("_NOut", bound=int, covariant=True) -_ReturnType_co = TypeVar("_ReturnType_co", covariant=True) -_ArrayT = TypeVar("_ArrayT", bound=np.ndarray[Any, Any]) +type _2PTuple[T] = tuple[T, T, *tuple[T, ...]] +type _3PTuple[T] = tuple[T, T, T, *tuple[T, ...]] +type _4PTuple[T] = tuple[T, T, T, T, *tuple[T, ...]] @type_check_only class _SupportsArrayUFunc(Protocol): @@ -68,6 +56,11 @@ class _UFunc3Kwargs(TypedDict, total=False): subok: bool signature: _3Tuple[str | None] | str | None +@type_check_only +class _ReduceKwargs(TypedDict, total=False): + initial: Incomplete # = + where: _ArrayLikeBool_co | None # = True + # NOTE: `reduce`, `accumulate`, `reduceat` and `outer` raise a ValueError for # ufuncs that don't accept two input arguments and return one output argument. # In such cases the respective methods return `NoReturn` @@ -81,15 +74,15 @@ class _UFunc3Kwargs(TypedDict, total=False): # pyright: reportIncompatibleMethodOverride=false @type_check_only -class _UFunc_Nin1_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: ignore[misc] +class _UFunc_Nin1_Nout1[NameT: LiteralString, NTypesT: int, IdentT](ufunc): # type: ignore[misc] @property - def __name__(self) -> _NameType: ... + def __name__(self) -> NameT: ... @property - def __qualname__(self) -> _NameType: ... + def __qualname__(self) -> NameT: ... # pyright: ignore[reportIncompatibleVariableOverride] @property - def ntypes(self) -> _NTypes: ... + def ntypes(self) -> NTypesT: ... @property - def identity(self) -> _IDType: ... + def identity(self) -> IdentT: ... @property def nin(self) -> Literal[1]: ... @property @@ -102,65 +95,63 @@ class _UFunc_Nin1_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: i @overload def __call__( self, - __x1: _ScalarLike_co, - out: None = ..., + x1: _ScalarLike_co, + /, + out: None = None, *, - where: _ArrayLikeBool_co | None = ..., + dtype: DTypeLike | None = None, + where: _ArrayLikeBool_co | None = True, casting: _CastingKind = ..., order: _OrderKACF = ..., - dtype: DTypeLike = ..., subok: bool = ..., signature: str | _2Tuple[str | None] = ..., - ) -> Any: ... + ) -> Incomplete: ... @overload def __call__( self, - __x1: ArrayLike, - out: NDArray[Any] | tuple[NDArray[Any]] | None = ..., + x1: ArrayLike, + /, + out: np.ndarray | tuple[np.ndarray] | EllipsisType | None = None, *, - where: _ArrayLikeBool_co | None = ..., + dtype: DTypeLike | None = None, + where: _ArrayLikeBool_co | None = True, casting: _CastingKind = ..., order: _OrderKACF = ..., - dtype: DTypeLike = ..., subok: bool = ..., signature: str | _2Tuple[str | None] = ..., - ) -> NDArray[Any]: ... + ) -> NDArray[Incomplete]: ... @overload def __call__( self, - __x1: _SupportsArrayUFunc, - out: NDArray[Any] | tuple[NDArray[Any]] | None = ..., + x1: _SupportsArrayUFunc, + /, + out: np.ndarray | tuple[np.ndarray] | EllipsisType | None = None, *, - where: _ArrayLikeBool_co | None = ..., + dtype: DTypeLike | None = None, + where: _ArrayLikeBool_co | None = True, casting: _CastingKind = ..., order: _OrderKACF = ..., - dtype: DTypeLike = ..., subok: bool = ..., signature: str | _2Tuple[str | None] = ..., - ) -> Any: ... + ) -> Incomplete: ... - def at( - self, - a: _SupportsArrayUFunc, - indices: _ArrayLikeInt_co, - /, - ) -> None: ... + def accumulate(self, array: Never, /) -> NoReturn: ... # type: ignore[override] + def reduce(self, array: Never, /) -> NoReturn: ... # type: ignore[override] + def reduceat(self, array: Never, /, indices: Never) -> NoReturn: ... # type: ignore[override] + def outer(self, A: Never, B: Never, /) -> NoReturn: ... # type: ignore[override] - def reduce(self, *args, **kwargs) -> NoReturn: ... - def accumulate(self, *args, **kwargs) -> NoReturn: ... - def reduceat(self, *args, **kwargs) -> NoReturn: ... - def outer(self, *args, **kwargs) -> NoReturn: ... + def at(self, a: np.ndarray | _SupportsArrayUFunc, indices: _ArrayLikeInt_co, /) -> None: ... # type: ignore[override] @type_check_only -class _UFunc_Nin2_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: ignore[misc] +class _UFunc_Nin2_Nout1[NameT: LiteralString, NTypesT: int, IdentT](ufunc): # type: ignore[misc] @property - def __name__(self) -> _NameType: ... + def __name__(self) -> NameT: ... @property - def __qualname__(self) -> _NameType: ... + def __qualname__(self) -> NameT: ... # pyright: ignore[reportIncompatibleVariableOverride] @property - def ntypes(self) -> _NTypes: ... + def ntypes(self) -> NTypesT: ... @property - def identity(self) -> _IDType: ... + def identity(self) -> IdentT: ... @property def nin(self) -> Literal[2]: ... @property @@ -176,94 +167,114 @@ class _UFunc_Nin2_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: i x1: _ScalarLike_co, x2: _ScalarLike_co, /, - out: None = None, + out: EllipsisType | None = None, *, dtype: DTypeLike | None = None, **kwds: Unpack[_UFunc3Kwargs], - ) -> Any: ... + ) -> Incomplete: ... @overload # (array-like, array) -> array def __call__( self, x1: ArrayLike, - x2: NDArray[np.generic], + x2: np.ndarray, /, - out: NDArray[np.generic] | tuple[NDArray[np.generic]] | None = None, + out: np.ndarray | tuple[np.ndarray] | EllipsisType | None = None, *, dtype: DTypeLike | None = None, **kwds: Unpack[_UFunc3Kwargs], - ) -> NDArray[Any]: ... + ) -> NDArray[Incomplete]: ... @overload # (array, array-like) -> array def __call__( self, - x1: NDArray[np.generic], + x1: np.ndarray, x2: ArrayLike, /, - out: NDArray[np.generic] | tuple[NDArray[np.generic]] | None = None, + out: np.ndarray | tuple[np.ndarray] | EllipsisType | None = None, *, dtype: DTypeLike | None = None, **kwds: Unpack[_UFunc3Kwargs], - ) -> NDArray[Any]: ... + ) -> NDArray[Incomplete]: ... @overload # (array-like, array-like, out=array) -> array def __call__( self, x1: ArrayLike, x2: ArrayLike, /, - out: NDArray[np.generic] | tuple[NDArray[np.generic]], + out: np.ndarray | tuple[np.ndarray], *, dtype: DTypeLike | None = None, **kwds: Unpack[_UFunc3Kwargs], - ) -> NDArray[Any]: ... + ) -> NDArray[Incomplete]: ... @overload # (array-like, array-like) -> array | scalar def __call__( self, x1: ArrayLike, x2: ArrayLike, /, - out: NDArray[np.generic] | tuple[NDArray[np.generic]] | None = None, + out: np.ndarray | tuple[np.ndarray] | EllipsisType | None = None, *, dtype: DTypeLike | None = None, **kwds: Unpack[_UFunc3Kwargs], - ) -> NDArray[Any] | Any: ... + ) -> NDArray[Incomplete] | Incomplete: ... - def at( + def accumulate( self, - a: NDArray[Any], - indices: _ArrayLikeInt_co, - b: ArrayLike, + array: ArrayLike, /, - ) -> None: ... + axis: SupportsIndex = 0, + dtype: DTypeLike | None = None, + out: np.ndarray | EllipsisType | None = None, + ) -> NDArray[Incomplete]: ... + @overload # type: ignore[override] + def reduce( # out=None (default), keepdims=False (default) + self, + array: ArrayLike, + /, + axis: _ShapeLike | None = 0, + dtype: DTypeLike | None = None, + out: None = None, + *, + keepdims: Literal[False] = False, + **kwargs: Unpack[_ReduceKwargs], + ) -> Incomplete: ... + @overload # out=ndarray or out=... def reduce( self, array: ArrayLike, - axis: _ShapeLike | None = ..., - dtype: DTypeLike = ..., - out: NDArray[Any] | None = ..., - keepdims: bool = ..., - initial: Any = ..., - where: _ArrayLikeBool_co = ..., - ) -> Any: ... - - def accumulate( + /, + axis: _ShapeLike | None = 0, + dtype: DTypeLike | None = None, + *, + out: np.ndarray | EllipsisType, + keepdims: bool = False, + **kwargs: Unpack[_ReduceKwargs], + ) -> NDArray[Incomplete]: ... + @overload # keepdims=True + def reduce( self, array: ArrayLike, - axis: SupportsIndex = ..., - dtype: DTypeLike = ..., - out: NDArray[Any] | None = ..., - ) -> NDArray[Any]: ... + /, + axis: _ShapeLike | None = 0, + dtype: DTypeLike | None = None, + out: np.ndarray | EllipsisType | None = None, + *, + keepdims: Literal[True], + **kwargs: Unpack[_ReduceKwargs], + ) -> NDArray[Incomplete]: ... def reduceat( self, array: ArrayLike, + /, indices: _ArrayLikeInt_co, - axis: SupportsIndex = ..., - dtype: DTypeLike = ..., - out: NDArray[Any] | None = ..., - ) -> NDArray[Any]: ... + axis: SupportsIndex = 0, + dtype: DTypeLike | None = None, + out: np.ndarray | EllipsisType | None = None, + ) -> NDArray[Incomplete]: ... - @overload # (scalar, scalar) -> scalar - def outer( + @overload # type: ignore[override] + def outer( # (scalar, scalar) -> scalar self, A: _ScalarLike_co, B: _ScalarLike_co, @@ -272,29 +283,29 @@ class _UFunc_Nin2_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: i out: None = None, dtype: DTypeLike | None = None, **kwds: Unpack[_UFunc3Kwargs], - ) -> Any: ... + ) -> Incomplete: ... @overload # (array-like, array) -> array def outer( self, A: ArrayLike, - B: NDArray[np.generic], + B: np.ndarray, /, *, - out: NDArray[np.generic] | tuple[NDArray[np.generic]] | None = None, + out: np.ndarray | tuple[np.ndarray] | EllipsisType | None = None, dtype: DTypeLike | None = None, **kwds: Unpack[_UFunc3Kwargs], - ) -> NDArray[Any]: ... + ) -> NDArray[Incomplete]: ... @overload # (array, array-like) -> array def outer( self, - A: NDArray[np.generic], + A: np.ndarray, B: ArrayLike, /, *, - out: NDArray[np.generic] | tuple[NDArray[np.generic]] | None = None, + out: np.ndarray | tuple[np.ndarray] | EllipsisType | None = None, dtype: DTypeLike | None = None, **kwds: Unpack[_UFunc3Kwargs], - ) -> NDArray[Any]: ... + ) -> NDArray[Incomplete]: ... @overload # (array-like, array-like, out=array) -> array def outer( self, @@ -302,10 +313,10 @@ class _UFunc_Nin2_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: i B: ArrayLike, /, *, - out: NDArray[np.generic] | tuple[NDArray[np.generic]], + out: np.ndarray | tuple[np.ndarray] | EllipsisType, dtype: DTypeLike | None = None, **kwds: Unpack[_UFunc3Kwargs], - ) -> NDArray[Any]: ... + ) -> NDArray[Incomplete]: ... @overload # (array-like, array-like) -> array | scalar def outer( self, @@ -313,21 +324,29 @@ class _UFunc_Nin2_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: i B: ArrayLike, /, *, - out: NDArray[np.generic] | tuple[NDArray[np.generic]] | None = None, + out: None = None, dtype: DTypeLike | None = None, **kwds: Unpack[_UFunc3Kwargs], - ) -> NDArray[Any] | Any: ... + ) -> NDArray[Incomplete] | Incomplete: ... + + def at( # type: ignore[override] + self, + a: np.ndarray | _SupportsArrayUFunc, + indices: _ArrayLikeInt_co, + b: ArrayLike, + /, + ) -> None: ... @type_check_only -class _UFunc_Nin1_Nout2(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: ignore[misc] +class _UFunc_Nin1_Nout2[NameT: LiteralString, NTypesT: int, IdentT](ufunc): # type: ignore[misc] @property - def __name__(self) -> _NameType: ... + def __name__(self) -> NameT: ... @property - def __qualname__(self) -> _NameType: ... + def __qualname__(self) -> NameT: ... # pyright: ignore[reportIncompatibleVariableOverride] @property - def ntypes(self) -> _NTypes: ... + def ntypes(self) -> NTypesT: ... @property - def identity(self) -> _IDType: ... + def identity(self) -> IdentT: ... @property def nin(self) -> Literal[1]: ... @property @@ -340,64 +359,68 @@ class _UFunc_Nin1_Nout2(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: i @overload def __call__( self, - __x1: _ScalarLike_co, - __out1: None = ..., - __out2: None = ..., + x1: _ScalarLike_co, + out1: EllipsisType | None = ..., + out2: None = None, + /, *, - where: _ArrayLikeBool_co | None = ..., + out: EllipsisType | None = ..., + dtype: DTypeLike | None = None, + where: _ArrayLikeBool_co | None = True, casting: _CastingKind = ..., order: _OrderKACF = ..., - dtype: DTypeLike = ..., subok: bool = ..., signature: str | _3Tuple[str | None] = ..., - ) -> _2Tuple[Any]: ... + ) -> _2Tuple[Incomplete]: ... @overload def __call__( self, - __x1: ArrayLike, - __out1: NDArray[Any] | None = ..., - __out2: NDArray[Any] | None = ..., + x1: ArrayLike, + out1: np.ndarray | EllipsisType | None = ..., + out2: np.ndarray | None = ..., + /, *, - out: _2Tuple[NDArray[Any]] = ..., - where: _ArrayLikeBool_co | None = ..., + out: _2Tuple[np.ndarray] | EllipsisType = ..., + dtype: DTypeLike | None = None, + where: _ArrayLikeBool_co | None = True, casting: _CastingKind = ..., order: _OrderKACF = ..., - dtype: DTypeLike = ..., subok: bool = ..., signature: str | _3Tuple[str | None] = ..., - ) -> _2Tuple[NDArray[Any]]: ... + ) -> _2Tuple[NDArray[Incomplete]]: ... @overload def __call__( self, - __x1: _SupportsArrayUFunc, - __out1: NDArray[Any] | None = ..., - __out2: NDArray[Any] | None = ..., + x1: _SupportsArrayUFunc, + out1: np.ndarray | EllipsisType | None = ..., + out2: np.ndarray | None = ..., + /, *, - out: _2Tuple[NDArray[Any]] = ..., - where: _ArrayLikeBool_co | None = ..., + out: _2Tuple[np.ndarray] | EllipsisType = ..., + dtype: DTypeLike | None = None, + where: _ArrayLikeBool_co | None = True, casting: _CastingKind = ..., order: _OrderKACF = ..., - dtype: DTypeLike = ..., subok: bool = ..., signature: str | _3Tuple[str | None] = ..., - ) -> _2Tuple[Any]: ... + ) -> _2Tuple[Incomplete]: ... - def at(self, *args, **kwargs) -> NoReturn: ... - def reduce(self, *args, **kwargs) -> NoReturn: ... - def accumulate(self, *args, **kwargs) -> NoReturn: ... - def reduceat(self, *args, **kwargs) -> NoReturn: ... - def outer(self, *args, **kwargs) -> NoReturn: ... + def accumulate(self, array: Never, /) -> NoReturn: ... # type: ignore[override] + def reduce(self, array: Never, /) -> NoReturn: ... # type: ignore[override] + def reduceat(self, array: Never, /, indices: Never) -> NoReturn: ... # type: ignore[override] + def outer(self, A: Never, B: Never, /) -> NoReturn: ... # type: ignore[override] + def at(self, a: Never, indices: Never, /) -> NoReturn: ... # type: ignore[override] @type_check_only -class _UFunc_Nin2_Nout2(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: ignore[misc] +class _UFunc_Nin2_Nout2[NameT: LiteralString, NTypesT: int, IdentT](ufunc): # type: ignore[misc] @property - def __name__(self) -> _NameType: ... + def __name__(self) -> NameT: ... @property - def __qualname__(self) -> _NameType: ... + def __qualname__(self) -> NameT: ... # pyright: ignore[reportIncompatibleVariableOverride] @property - def ntypes(self) -> _NTypes: ... + def ntypes(self) -> NTypesT: ... @property - def identity(self) -> _IDType: ... + def identity(self) -> IdentT: ... @property def nin(self) -> Literal[2]: ... @property @@ -410,51 +433,54 @@ class _UFunc_Nin2_Nout2(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: i @overload def __call__( self, - __x1: _ScalarLike_co, - __x2: _ScalarLike_co, - __out1: None = ..., - __out2: None = ..., + x1: _ScalarLike_co, + x2: _ScalarLike_co, + out1: EllipsisType | None = ..., + out2: None = None, + /, *, - where: _ArrayLikeBool_co | None = ..., + out: EllipsisType | None = ..., + dtype: DTypeLike | None = None, + where: _ArrayLikeBool_co | None = True, casting: _CastingKind = ..., order: _OrderKACF = ..., - dtype: DTypeLike = ..., subok: bool = ..., signature: str | _4Tuple[str | None] = ..., - ) -> _2Tuple[Any]: ... + ) -> _2Tuple[Incomplete]: ... @overload def __call__( self, - __x1: ArrayLike, - __x2: ArrayLike, - __out1: NDArray[Any] | None = ..., - __out2: NDArray[Any] | None = ..., + x1: ArrayLike, + x2: ArrayLike, + out1: np.ndarray | EllipsisType | None = ..., + out2: np.ndarray | None = ..., + /, *, - out: _2Tuple[NDArray[Any]] = ..., - where: _ArrayLikeBool_co | None = ..., + out: _2Tuple[np.ndarray] | EllipsisType = ..., + dtype: DTypeLike | None = None, + where: _ArrayLikeBool_co | None = True, casting: _CastingKind = ..., order: _OrderKACF = ..., - dtype: DTypeLike = ..., subok: bool = ..., signature: str | _4Tuple[str | None] = ..., - ) -> _2Tuple[NDArray[Any]]: ... + ) -> _2Tuple[NDArray[Incomplete]]: ... - def at(self, *args, **kwargs) -> NoReturn: ... - def reduce(self, *args, **kwargs) -> NoReturn: ... - def accumulate(self, *args, **kwargs) -> NoReturn: ... - def reduceat(self, *args, **kwargs) -> NoReturn: ... - def outer(self, *args, **kwargs) -> NoReturn: ... + def accumulate(self, array: Never, /) -> NoReturn: ... # type: ignore[override] + def reduce(self, array: Never, /) -> NoReturn: ... # type: ignore[override] + def reduceat(self, array: Never, /, indices: Never) -> NoReturn: ... # type: ignore[override] + def outer(self, A: Never, B: Never, /) -> NoReturn: ... # type: ignore[override] + def at(self, a: Never, indices: Never, b: Never, /) -> NoReturn: ... # type: ignore[override] @type_check_only -class _GUFunc_Nin2_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType, _Signature]): # type: ignore[misc] +class _GUFunc_Nin2_Nout1[NameT: LiteralString, NTypesT: int, IdentT, SignatureT: LiteralString](ufunc): # type: ignore[misc] @property - def __name__(self) -> _NameType: ... + def __name__(self) -> NameT: ... @property - def __qualname__(self) -> _NameType: ... + def __qualname__(self) -> NameT: ... # pyright: ignore[reportIncompatibleVariableOverride] @property - def ntypes(self) -> _NTypes: ... + def ntypes(self) -> NTypesT: ... @property - def identity(self) -> _IDType: ... + def identity(self) -> IdentT: ... @property def nin(self) -> Literal[2]: ... @property @@ -462,43 +488,45 @@ class _GUFunc_Nin2_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType, _Signature] @property def nargs(self) -> Literal[3]: ... @property - def signature(self) -> _Signature: ... + def signature(self) -> SignatureT: ... # Scalar for 1D array-likes; ndarray otherwise @overload def __call__( self, - __x1: ArrayLike, - __x2: ArrayLike, - out: None = ..., + x1: ArrayLike, + x2: ArrayLike, + /, + out: EllipsisType | None = None, *, + dtype: DTypeLike | None = None, casting: _CastingKind = ..., order: _OrderKACF = ..., - dtype: DTypeLike = ..., subok: bool = ..., signature: str | _3Tuple[str | None] = ..., axes: list[_2Tuple[SupportsIndex]] = ..., - ) -> Any: ... + ) -> Incomplete: ... @overload def __call__( self, - __x1: ArrayLike, - __x2: ArrayLike, - out: NDArray[Any] | tuple[NDArray[Any]], + x1: ArrayLike, + x2: ArrayLike, + /, + out: np.ndarray | tuple[np.ndarray] | EllipsisType, *, + dtype: DTypeLike | None = None, casting: _CastingKind = ..., order: _OrderKACF = ..., - dtype: DTypeLike = ..., subok: bool = ..., signature: str | _3Tuple[str | None] = ..., axes: list[_2Tuple[SupportsIndex]] = ..., - ) -> NDArray[Any]: ... + ) -> NDArray[Incomplete]: ... - def at(self, *args, **kwargs) -> NoReturn: ... - def reduce(self, *args, **kwargs) -> NoReturn: ... - def accumulate(self, *args, **kwargs) -> NoReturn: ... - def reduceat(self, *args, **kwargs) -> NoReturn: ... - def outer(self, *args, **kwargs) -> NoReturn: ... + def accumulate(self, array: Never, /) -> NoReturn: ... # type: ignore[override] + def reduce(self, array: Never, /) -> NoReturn: ... # type: ignore[override] + def reduceat(self, array: Never, /, indices: Never) -> NoReturn: ... # type: ignore[override] + def outer(self, A: Never, B: Never, /) -> NoReturn: ... # type: ignore[override] + def at(self, a: Never, indices: Never, b: Never, /) -> NoReturn: ... # type: ignore[override] @type_check_only class _PyFunc_Kwargs_Nargs2(TypedDict, total=False): @@ -537,9 +565,9 @@ class _PyFunc_Kwargs_Nargs4P(TypedDict, total=False): signature: str | _4PTuple[DTypeLike] @type_check_only -class _PyFunc_Nin1_Nout1(ufunc, Generic[_ReturnType_co, _IDType]): # type: ignore[misc] +class _PyFunc_Nin1_Nout1[ReturnT, IdentT](ufunc): # type: ignore[misc] @property - def identity(self) -> _IDType: ... + def identity(self) -> IdentT: ... @property def nin(self) -> Literal[1]: ... @property @@ -556,44 +584,45 @@ class _PyFunc_Nin1_Nout1(ufunc, Generic[_ReturnType_co, _IDType]): # type: igno self, x1: _ScalarLike_co, /, - out: None = ..., + out: EllipsisType | None = None, **kwargs: Unpack[_PyFunc_Kwargs_Nargs2], - ) -> _ReturnType_co: ... + ) -> ReturnT: ... @overload def __call__( self, x1: ArrayLike, /, - out: None = ..., + out: EllipsisType | None = None, **kwargs: Unpack[_PyFunc_Kwargs_Nargs2], - ) -> _ReturnType_co | NDArray[np.object_]: ... + ) -> ReturnT | NDArray[np.object_]: ... @overload - def __call__( + def __call__[OutT: np.ndarray]( self, x1: ArrayLike, /, - out: _ArrayT | tuple[_ArrayT], + out: OutT | tuple[OutT], **kwargs: Unpack[_PyFunc_Kwargs_Nargs2], - ) -> _ArrayT: ... + ) -> OutT: ... @overload def __call__( self, x1: _SupportsArrayUFunc, /, - out: NDArray[Any] | tuple[NDArray[Any]] | None = ..., + out: np.ndarray | tuple[np.ndarray] | EllipsisType | None = None, **kwargs: Unpack[_PyFunc_Kwargs_Nargs2], - ) -> Any: ... + ) -> Incomplete: ... + + def accumulate(self, array: Never, /) -> NoReturn: ... # type: ignore[override] + def reduce(self, array: Never, /) -> NoReturn: ... # type: ignore[override] + def reduceat(self, array: Never, /, indices: Never) -> NoReturn: ... # type: ignore[override] + def outer(self, A: Never, B: Never, /) -> NoReturn: ... # type: ignore[override] - def at(self, a: _SupportsArrayUFunc, ixs: _ArrayLikeInt_co, /) -> None: ... - def reduce(self, /, *args: Any, **kwargs: Any) -> NoReturn: ... - def accumulate(self, /, *args: Any, **kwargs: Any) -> NoReturn: ... - def reduceat(self, /, *args: Any, **kwargs: Any) -> NoReturn: ... - def outer(self, /, *args: Any, **kwargs: Any) -> NoReturn: ... + def at(self, a: np.ndarray | _SupportsArrayUFunc, indices: _ArrayLikeInt_co, /) -> None: ... # type: ignore[override] @type_check_only -class _PyFunc_Nin2_Nout1(ufunc, Generic[_ReturnType_co, _IDType]): # type: ignore[misc] +class _PyFunc_Nin2_Nout1[ReturnT, IdentT](ufunc): # type: ignore[misc] @property - def identity(self) -> _IDType: ... + def identity(self) -> IdentT: ... @property def nin(self) -> Literal[2]: ... @property @@ -611,221 +640,211 @@ class _PyFunc_Nin2_Nout1(ufunc, Generic[_ReturnType_co, _IDType]): # type: igno x1: _ScalarLike_co, x2: _ScalarLike_co, /, - out: None = ..., + out: EllipsisType | None = None, **kwargs: Unpack[_PyFunc_Kwargs_Nargs3], - ) -> _ReturnType_co: ... + ) -> ReturnT: ... @overload def __call__( self, x1: ArrayLike, x2: ArrayLike, /, - out: None = ..., + out: EllipsisType | None = ..., **kwargs: Unpack[_PyFunc_Kwargs_Nargs3], - ) -> _ReturnType_co | NDArray[np.object_]: ... + ) -> ReturnT | NDArray[np.object_]: ... @overload - def __call__( + def __call__[OutT: np.ndarray]( self, x1: ArrayLike, x2: ArrayLike, /, - out: _ArrayT | tuple[_ArrayT], + out: OutT | tuple[OutT], **kwargs: Unpack[_PyFunc_Kwargs_Nargs3], - ) -> _ArrayT: ... + ) -> OutT: ... @overload def __call__( self, x1: _SupportsArrayUFunc, x2: _SupportsArrayUFunc | ArrayLike, /, - out: NDArray[Any] | tuple[NDArray[Any]] | None = ..., + out: np.ndarray | tuple[np.ndarray] | EllipsisType | None = None, **kwargs: Unpack[_PyFunc_Kwargs_Nargs3], - ) -> Any: ... + ) -> Incomplete: ... @overload def __call__( self, x1: ArrayLike, x2: _SupportsArrayUFunc, /, - out: NDArray[Any] | tuple[NDArray[Any]] | None = ..., + out: np.ndarray | tuple[np.ndarray] | EllipsisType | None = None, **kwargs: Unpack[_PyFunc_Kwargs_Nargs3], - ) -> Any: ... + ) -> Incomplete: ... - def at(self, a: _SupportsArrayUFunc, ixs: _ArrayLikeInt_co, b: ArrayLike, /) -> None: ... - - @overload - def reduce( + @overload # type: ignore[override] + def accumulate( self, array: ArrayLike, - axis: _ShapeLike | None, - dtype: DTypeLike, - out: _ArrayT, /, - keepdims: bool = ..., - initial: _ScalarLike_co = ..., - where: _ArrayLikeBool_co = ..., - ) -> _ArrayT: ... + axis: SupportsIndex = 0, + dtype: DTypeLike | None = None, + out: EllipsisType | None = None, + ) -> NDArray[np.object_]: ... @overload - def reduce( + def accumulate[OutT: np.ndarray]( self, + array: ArrayLike, /, + axis: SupportsIndex = 0, + dtype: DTypeLike | None = None, + *, + out: OutT, + ) -> OutT: ... + + @overload # type: ignore[override] + def reduce[OutT: np.ndarray]( # out=array + self, array: ArrayLike, - axis: _ShapeLike | None = ..., - dtype: DTypeLike = ..., + /, + axis: _ShapeLike | None = 0, + dtype: DTypeLike | None = None, *, - out: _ArrayT | tuple[_ArrayT], - keepdims: bool = ..., - initial: _ScalarLike_co = ..., - where: _ArrayLikeBool_co = ..., - ) -> _ArrayT: ... - @overload + out: OutT | tuple[OutT], + keepdims: bool = False, + **kwargs: Unpack[_ReduceKwargs], + ) -> OutT: ... + @overload # out=... def reduce( self, + array: ArrayLike, /, + axis: _ShapeLike | None = 0, + dtype: DTypeLike | None = None, + *, + out: EllipsisType, + keepdims: bool = False, + **kwargs: Unpack[_ReduceKwargs], + ) -> NDArray[np.object_]: ... + @overload # keepdims=True + def reduce( + self, array: ArrayLike, - axis: _ShapeLike | None = ..., - dtype: DTypeLike = ..., - out: None = ..., + /, + axis: _ShapeLike | None = 0, + dtype: DTypeLike | None = None, + out: EllipsisType | None = None, *, keepdims: Literal[True], - initial: _ScalarLike_co = ..., - where: _ArrayLikeBool_co = ..., + **kwargs: Unpack[_ReduceKwargs], ) -> NDArray[np.object_]: ... @overload def reduce( self, - /, array: ArrayLike, - axis: _ShapeLike | None = ..., - dtype: DTypeLike = ..., - out: None = ..., - keepdims: bool = ..., - initial: _ScalarLike_co = ..., - where: _ArrayLikeBool_co = ..., - ) -> _ReturnType_co | NDArray[np.object_]: ... + /, + axis: _ShapeLike | None = 0, + dtype: DTypeLike | None = None, + out: EllipsisType | None = None, + keepdims: bool = False, + **kwargs: Unpack[_ReduceKwargs], + ) -> ReturnT | NDArray[np.object_]: ... - @overload - def reduceat( + @overload # type: ignore[override] + def reduceat[OutT: np.ndarray]( self, array: ArrayLike, - indices: _ArrayLikeInt_co, - axis: SupportsIndex, - dtype: DTypeLike, - out: _ArrayT, /, - ) -> _ArrayT: ... - @overload - def reduceat( - self, - /, - array: ArrayLike, indices: _ArrayLikeInt_co, - axis: SupportsIndex = ..., - dtype: DTypeLike = ..., + axis: SupportsIndex = 0, + dtype: DTypeLike | None = None, *, - out: _ArrayT | tuple[_ArrayT], - ) -> _ArrayT: ... + out: OutT | tuple[OutT], + ) -> OutT: ... @overload def reduceat( self, - /, array: ArrayLike, + /, indices: _ArrayLikeInt_co, - axis: SupportsIndex = ..., - dtype: DTypeLike = ..., - out: None = ..., + axis: SupportsIndex = 0, + dtype: DTypeLike | None = None, + out: EllipsisType | None = None, ) -> NDArray[np.object_]: ... @overload def reduceat( self, - /, array: _SupportsArrayUFunc, - indices: _ArrayLikeInt_co, - axis: SupportsIndex = ..., - dtype: DTypeLike = ..., - out: NDArray[Any] | tuple[NDArray[Any]] | None = ..., - ) -> Any: ... - - @overload - def accumulate( - self, - array: ArrayLike, - axis: SupportsIndex, - dtype: DTypeLike, - out: _ArrayT, - /, - ) -> _ArrayT: ... - @overload - def accumulate( - self, - array: ArrayLike, - axis: SupportsIndex = ..., - dtype: DTypeLike = ..., - *, - out: _ArrayT | tuple[_ArrayT], - ) -> _ArrayT: ... - @overload - def accumulate( - self, /, - array: ArrayLike, - axis: SupportsIndex = ..., - dtype: DTypeLike = ..., - out: None = ..., - ) -> NDArray[np.object_]: ... + indices: _ArrayLikeInt_co, + axis: SupportsIndex = 0, + dtype: DTypeLike | None = None, + out: np.ndarray | tuple[np.ndarray] | EllipsisType | None = None, + ) -> Incomplete: ... - @overload + @overload # type: ignore[override] def outer( self, A: _ScalarLike_co, B: _ScalarLike_co, - /, *, - out: None = ..., + /, + *, + out: EllipsisType | None = None, **kwargs: Unpack[_PyFunc_Kwargs_Nargs3], - ) -> _ReturnType_co: ... + ) -> ReturnT: ... @overload def outer( self, A: ArrayLike, B: ArrayLike, - /, *, - out: None = ..., + /, + *, + out: EllipsisType | None = None, **kwargs: Unpack[_PyFunc_Kwargs_Nargs3], - ) -> _ReturnType_co | NDArray[np.object_]: ... + ) -> ReturnT | NDArray[np.object_]: ... @overload - def outer( + def outer[OutT: np.ndarray]( self, A: ArrayLike, B: ArrayLike, - /, *, - out: _ArrayT, + /, + *, + out: OutT, **kwargs: Unpack[_PyFunc_Kwargs_Nargs3], - ) -> _ArrayT: ... + ) -> OutT: ... @overload def outer( self, A: _SupportsArrayUFunc, B: _SupportsArrayUFunc | ArrayLike, - /, *, - out: None = ..., + /, + *, + out: EllipsisType | None = None, **kwargs: Unpack[_PyFunc_Kwargs_Nargs3], - ) -> Any: ... + ) -> Incomplete: ... @overload def outer( self, A: _ScalarLike_co, B: _SupportsArrayUFunc | ArrayLike, - /, *, - out: None = ..., + /, + *, + out: EllipsisType | None = None, **kwargs: Unpack[_PyFunc_Kwargs_Nargs3], - ) -> Any: ... + ) -> Incomplete: ... + + def at( # type: ignore[override] + self, + a: np.ndarray | _SupportsArrayUFunc, + indices: _ArrayLikeInt_co, + b: ArrayLike, + /, + ) -> None: ... @type_check_only -class _PyFunc_Nin3P_Nout1(ufunc, Generic[_ReturnType_co, _IDType, _NIn]): # type: ignore[misc] +class _PyFunc_Nin3P_Nout1[ReturnT, IdentT, NInT: int](ufunc): # type: ignore[misc] @property - def identity(self) -> _IDType: ... + def identity(self) -> IdentT: ... @property - def nin(self) -> _NIn: ... + def nin(self) -> NInT: ... @property def nout(self) -> Literal[1]: ... @property @@ -841,9 +860,9 @@ class _PyFunc_Nin3P_Nout1(ufunc, Generic[_ReturnType_co, _IDType, _NIn]): # typ x3: _ScalarLike_co, /, *xs: _ScalarLike_co, - out: None = ..., + out: EllipsisType | None = ..., **kwargs: Unpack[_PyFunc_Kwargs_Nargs4P], - ) -> _ReturnType_co: ... + ) -> ReturnT: ... @overload def __call__( self, @@ -852,20 +871,20 @@ class _PyFunc_Nin3P_Nout1(ufunc, Generic[_ReturnType_co, _IDType, _NIn]): # typ x3: ArrayLike, /, *xs: ArrayLike, - out: None = ..., + out: EllipsisType | None = ..., **kwargs: Unpack[_PyFunc_Kwargs_Nargs4P], - ) -> _ReturnType_co | NDArray[np.object_]: ... + ) -> ReturnT | NDArray[np.object_]: ... @overload - def __call__( + def __call__[OutT: np.ndarray]( self, x1: ArrayLike, x2: ArrayLike, x3: ArrayLike, /, *xs: ArrayLike, - out: _ArrayT | tuple[_ArrayT], + out: OutT | tuple[OutT], **kwargs: Unpack[_PyFunc_Kwargs_Nargs4P], - ) -> _ArrayT: ... + ) -> OutT: ... @overload def __call__( self, @@ -874,24 +893,24 @@ class _PyFunc_Nin3P_Nout1(ufunc, Generic[_ReturnType_co, _IDType, _NIn]): # typ x3: _SupportsArrayUFunc | ArrayLike, /, *xs: _SupportsArrayUFunc | ArrayLike, - out: NDArray[Any] | tuple[NDArray[Any]] | None = ..., + out: np.ndarray | tuple[np.ndarray] | EllipsisType | None = ..., **kwargs: Unpack[_PyFunc_Kwargs_Nargs4P], - ) -> Any: ... + ) -> Incomplete: ... - def at(self, /, *args: Any, **kwargs: Any) -> NoReturn: ... - def reduce(self, /, *args: Any, **kwargs: Any) -> NoReturn: ... - def accumulate(self, /, *args: Any, **kwargs: Any) -> NoReturn: ... - def reduceat(self, /, *args: Any, **kwargs: Any) -> NoReturn: ... - def outer(self, /, *args: Any, **kwargs: Any) -> NoReturn: ... + def accumulate(self, array: Never, /) -> NoReturn: ... # type: ignore[override] + def reduce(self, array: Never, /) -> NoReturn: ... # type: ignore[override] + def reduceat(self, array: Never, /, indices: Never) -> NoReturn: ... # type: ignore[override] + def outer(self, A: Never, B: Never, /) -> NoReturn: ... # type: ignore[override] + def at(self, a: Never, indices: Never, /, *args: Never) -> NoReturn: ... # type: ignore[override] @type_check_only -class _PyFunc_Nin1P_Nout2P(ufunc, Generic[_ReturnType_co, _IDType, _NIn, _NOut]): # type: ignore[misc] +class _PyFunc_Nin1P_Nout2P[ReturnT, IdentT, NInT: int, NOutT: int](ufunc): # type: ignore[misc] @property - def identity(self) -> _IDType: ... + def identity(self) -> IdentT: ... @property - def nin(self) -> _NIn: ... + def nin(self) -> NInT: ... @property - def nout(self) -> _NOut: ... + def nout(self) -> NOutT: ... @property def ntypes(self) -> Literal[1]: ... @property @@ -903,39 +922,39 @@ class _PyFunc_Nin1P_Nout2P(ufunc, Generic[_ReturnType_co, _IDType, _NIn, _NOut]) x1: _ScalarLike_co, /, *xs: _ScalarLike_co, - out: None = ..., + out: EllipsisType | None = ..., **kwargs: Unpack[_PyFunc_Kwargs_Nargs3P], - ) -> _2PTuple[_ReturnType_co]: ... + ) -> _2PTuple[ReturnT]: ... @overload def __call__( self, x1: ArrayLike, /, *xs: ArrayLike, - out: None = ..., + out: EllipsisType | None = ..., **kwargs: Unpack[_PyFunc_Kwargs_Nargs3P], - ) -> _2PTuple[_ReturnType_co | NDArray[np.object_]]: ... + ) -> _2PTuple[ReturnT | NDArray[np.object_]]: ... @overload - def __call__( + def __call__[OutT: np.ndarray]( self, x1: ArrayLike, /, *xs: ArrayLike, - out: _2PTuple[_ArrayT], + out: _2PTuple[OutT], **kwargs: Unpack[_PyFunc_Kwargs_Nargs3P], - ) -> _2PTuple[_ArrayT]: ... + ) -> _2PTuple[OutT]: ... @overload def __call__( self, x1: _SupportsArrayUFunc | ArrayLike, /, *xs: _SupportsArrayUFunc | ArrayLike, - out: _2PTuple[NDArray[Any]] | None = ..., + out: _2PTuple[np.ndarray] | EllipsisType | None = ..., **kwargs: Unpack[_PyFunc_Kwargs_Nargs3P], - ) -> Any: ... + ) -> Incomplete: ... - def at(self, /, *args: Any, **kwargs: Any) -> NoReturn: ... - def reduce(self, /, *args: Any, **kwargs: Any) -> NoReturn: ... - def accumulate(self, /, *args: Any, **kwargs: Any) -> NoReturn: ... - def reduceat(self, /, *args: Any, **kwargs: Any) -> NoReturn: ... - def outer(self, /, *args: Any, **kwargs: Any) -> NoReturn: ... + def accumulate(self, array: Never, /) -> NoReturn: ... # type: ignore[override] + def reduce(self, array: Never, /) -> NoReturn: ... # type: ignore[override] + def reduceat(self, array: Never, /, indices: Never) -> NoReturn: ... # type: ignore[override] + def outer(self, A: Never, B: Never, /) -> NoReturn: ... # type: ignore[override] + def at(self, a: Never, indices: Never, /, *args: Never) -> NoReturn: ... # type: ignore[override] diff --git a/numpy/_utils/__init__.pyi b/numpy/_utils/__init__.pyi index f3472df9a554..7a78cabe60f3 100644 --- a/numpy/_utils/__init__.pyi +++ b/numpy/_utils/__init__.pyi @@ -1,16 +1,11 @@ -from collections.abc import Callable, Iterable -from typing import Protocol, TypeVar, overload, type_check_only - from _typeshed import IdentityFunction +from collections.abc import Callable, Iterable +from typing import Protocol, overload, type_check_only -from ._convertions import asbytes as asbytes -from ._convertions import asunicode as asunicode +from ._convertions import asbytes as asbytes, asunicode as asunicode ### -_T = TypeVar("_T") -_HasModuleT = TypeVar("_HasModuleT", bound=_HasModule) - @type_check_only class _HasModule(Protocol): __module__: str @@ -20,11 +15,11 @@ class _HasModule(Protocol): @overload def set_module(module: None) -> IdentityFunction: ... @overload -def set_module(module: str) -> Callable[[_HasModuleT], _HasModuleT]: ... +def set_module[ModuleT: _HasModule](module: str) -> Callable[[ModuleT], ModuleT]: ... # -def _rename_parameter( +def _rename_parameter[T]( old_names: Iterable[str], new_names: Iterable[str], dep_version: str | None = None, -) -> Callable[[Callable[..., _T]], Callable[..., _T]]: ... +) -> Callable[[Callable[..., T]], Callable[..., T]]: ... diff --git a/numpy/_utils/_inspect.pyi b/numpy/_utils/_inspect.pyi index d53c3c40fcf5..dd738025b728 100644 --- a/numpy/_utils/_inspect.pyi +++ b/numpy/_utils/_inspect.pyi @@ -1,22 +1,18 @@ import types -from collections.abc import Callable, Mapping -from typing import Any, Final, TypeAlias, TypeVar, overload - from _typeshed import SupportsLenAndGetItem +from collections.abc import Callable, Mapping +from typing import Any, Final, overload from typing_extensions import TypeIs __all__ = ["formatargspec", "getargspec"] ### -_T = TypeVar("_T") -_RT = TypeVar("_RT") - -_StrSeq: TypeAlias = SupportsLenAndGetItem[str] -_NestedSeq: TypeAlias = list[_T | _NestedSeq[_T]] | tuple[_T | _NestedSeq[_T], ...] +type _StrSeq = SupportsLenAndGetItem[str] +type _NestedSeq[T] = list[T | _NestedSeq[T]] | tuple[T | _NestedSeq[T], ...] -_JoinFunc: TypeAlias = Callable[[list[_T]], _T] -_FormatFunc: TypeAlias = Callable[[_T], str] +type _JoinFunc[T] = Callable[[list[T]], T] +type _FormatFunc[T] = Callable[[T], str] ### @@ -44,7 +40,7 @@ def joinseq(seq: _StrSeq) -> str: ... @overload def strseq(object: _NestedSeq[str], convert: Callable[[Any], Any], join: _JoinFunc[str] = ...) -> str: ... @overload -def strseq(object: _NestedSeq[_T], convert: Callable[[_T], _RT], join: _JoinFunc[_RT]) -> _RT: ... +def strseq[VT, RT](object: _NestedSeq[VT], convert: Callable[[VT], RT], join: _JoinFunc[RT]) -> RT: ... # def formatargspec( diff --git a/numpy/_utils/_pep440.pyi b/numpy/_utils/_pep440.pyi index 29dd4c912aa9..593960274814 100644 --- a/numpy/_utils/_pep440.pyi +++ b/numpy/_utils/_pep440.pyi @@ -5,22 +5,17 @@ from typing import ( ClassVar, Final, Generic, + Literal as L, NamedTuple, - TypeVar, final, type_check_only, ) -from typing import ( - Literal as L, -) - -from typing_extensions import TypeIs +from typing_extensions import TypeIs, TypeVar __all__ = ["VERSION_PATTERN", "InvalidVersion", "LegacyVersion", "Version", "parse"] ### -_CmpKeyT = TypeVar("_CmpKeyT", bound=tuple[object, ...]) _CmpKeyT_co = TypeVar("_CmpKeyT_co", bound=tuple[object, ...], default=tuple[Any, ...], covariant=True) ### @@ -74,7 +69,12 @@ class _BaseVersion(Generic[_CmpKeyT_co]): def __le__(self, other: _BaseVersion, /) -> bool: ... def __ge__(self, other: _BaseVersion, /) -> bool: ... def __gt__(self, other: _BaseVersion, /) -> bool: ... - def _compare(self, /, other: _BaseVersion[_CmpKeyT], method: Callable[[_CmpKeyT_co, _CmpKeyT], bool]) -> bool: ... + def _compare[CmpKeyT: tuple[object, ...]]( + self, + /, + other: _BaseVersion[CmpKeyT], + method: Callable[[_CmpKeyT_co, CmpKeyT], bool], + ) -> bool: ... class LegacyVersion(_BaseVersion[tuple[L[-1], tuple[str, ...]]]): _version: Final[str] diff --git a/numpy/conftest.py b/numpy/conftest.py index fde4defc926d..c3c96ef3bc39 100644 --- a/numpy/conftest.py +++ b/numpy/conftest.py @@ -2,19 +2,17 @@ Pytest configuration and fixtures for the Numpy test suite. """ import os -import string import sys import tempfile import warnings from contextlib import contextmanager +from pathlib import Path import hypothesis import pytest import numpy -import numpy as np from numpy._core._multiarray_tests import get_fpu_mode -from numpy._core.tests._natype import get_stringdtype_dtype, pd_NA from numpy.testing._private.utils import NOGIL_BUILD try: @@ -23,6 +21,11 @@ except ModuleNotFoundError: HAVE_SCPDT = False +try: + import pytest_run_parallel # noqa: F401 + PARALLEL_RUN_AVALIABLE = True +except ModuleNotFoundError: + PARALLEL_RUN_AVALIABLE = False _old_fpu_mode = None _collect_results = {} @@ -65,6 +68,17 @@ def pytest_configure(config): "slow: Tests that are very slow.") config.addinivalue_line("markers", "slow_pypy: Tests that are very slow on pypy.") + if not PARALLEL_RUN_AVALIABLE: + config.addinivalue_line("markers", + "parallel_threads(n): run the given test function in parallel " + "using `n` threads.", + ) + config.addinivalue_line("markers", + "iterations(n): run the given test function `n` times in each thread", + ) + config.addinivalue_line("markers", + "thread_unsafe: mark the test function as single-threaded", + ) def pytest_addoption(parser): @@ -103,7 +117,7 @@ def pytest_terminal_summary(terminalreporter, exitstatus, config): pytest.exit("GIL re-enabled during tests", returncode=1) # FIXME when yield tests are gone. -@pytest.hookimpl() +@pytest.hookimpl(tryfirst=True) def pytest_itemcollected(item): """ Check FPU precision mode was not changed during test collection. @@ -122,6 +136,11 @@ def pytest_itemcollected(item): _collect_results[item] = (_old_fpu_mode, mode) _old_fpu_mode = mode + # mark f2py tests as thread unsafe + if Path(item.fspath).parent == Path(__file__).parent / 'f2py' / 'tests': + item.add_marker(pytest.mark.thread_unsafe( + reason="f2py tests are thread-unsafe")) + @pytest.fixture(scope="function", autouse=True) def check_fpu_mode(request): @@ -147,10 +166,6 @@ def check_fpu_mode(request): def add_np(doctest_namespace): doctest_namespace['np'] = numpy -@pytest.fixture(autouse=True) -def env_setup(monkeypatch): - monkeypatch.setenv('PYTHONHASHSEED', '0') - if HAVE_SCPDT: @@ -170,7 +185,8 @@ def warnings_errors_and_rng(test=None): "This function is deprecated.", # random_integers "Data type alias 'a'", # numpy.rec.fromfile "Arrays of 2-dimensional vectors", # matlib.cross - "`in1d` is deprecated", ] + "NumPy warning suppression and assertion utilities are deprecated." + ] msg = "|".join(msgs) msgs_r = [ @@ -230,29 +246,3 @@ def warnings_errors_and_rng(test=None): 'numpy/random/_examples', 'numpy/f2py/_backends/_distutils.py', ] - - -@pytest.fixture -def random_string_list(): - chars = list(string.ascii_letters + string.digits) - chars = np.array(chars, dtype="U1") - ret = np.random.choice(chars, size=100 * 10, replace=True) - return ret.view("U100") - - -@pytest.fixture(params=[True, False]) -def coerce(request): - return request.param - - -@pytest.fixture( - params=["unset", None, pd_NA, np.nan, float("nan"), "__nan__"], - ids=["unset", "None", "pandas.NA", "np.nan", "float('nan')", "string nan"], -) -def na_object(request): - return request.param - - -@pytest.fixture() -def dtype(na_object, coerce): - return get_stringdtype_dtype(na_object, coerce) diff --git a/numpy/ctypeslib/__init__.pyi b/numpy/ctypeslib/__init__.pyi index adc51da2696c..f088d0281d33 100644 --- a/numpy/ctypeslib/__init__.pyi +++ b/numpy/ctypeslib/__init__.pyi @@ -3,31 +3,13 @@ from ctypes import c_int64 as _c_intp from ._ctypeslib import ( __all__ as __all__, -) -from ._ctypeslib import ( __doc__ as __doc__, -) -from ._ctypeslib import ( _concrete_ndptr as _concrete_ndptr, -) -from ._ctypeslib import ( _ndptr as _ndptr, -) -from ._ctypeslib import ( as_array as as_array, -) -from ._ctypeslib import ( as_ctypes as as_ctypes, -) -from ._ctypeslib import ( as_ctypes_type as as_ctypes_type, -) -from ._ctypeslib import ( c_intp as c_intp, -) -from ._ctypeslib import ( load_library as load_library, -) -from ._ctypeslib import ( ndpointer as ndpointer, ) diff --git a/numpy/ctypeslib/_ctypeslib.py b/numpy/ctypeslib/_ctypeslib.py index 9255603cd5d0..a18e11810418 100644 --- a/numpy/ctypeslib/_ctypeslib.py +++ b/numpy/ctypeslib/_ctypeslib.py @@ -502,7 +502,7 @@ def as_ctypes_type(dtype): -------- Converting a simple dtype: - >>> dt = np.dtype('int8') + >>> dt = np.dtype(np.int8) >>> ctype = np.ctypeslib.as_ctypes_type(dt) >>> ctype diff --git a/numpy/ctypeslib/_ctypeslib.pyi b/numpy/ctypeslib/_ctypeslib.pyi index e26d6052eaae..2e88d7d9464f 100644 --- a/numpy/ctypeslib/_ctypeslib.pyi +++ b/numpy/ctypeslib/_ctypeslib.pyi @@ -1,40 +1,9 @@ -# NOTE: Numpy's mypy plugin is used for importing the correct -# platform-specific `ctypes._SimpleCData[int]` sub-type -import ctypes -from collections.abc import Iterable, Sequence -from ctypes import c_int64 as _c_intp -from typing import ( - Any, - ClassVar, - Generic, - TypeAlias, - TypeVar, - overload, -) -from typing import Literal as L - +import ctypes as ct from _typeshed import StrOrBytesPath +from collections.abc import Iterable, Sequence +from typing import Any, ClassVar, Literal as L, overload import numpy as np -from numpy import ( - byte, - double, - dtype, - generic, - intc, - long, - longdouble, - longlong, - ndarray, - short, - single, - ubyte, - uintc, - ulong, - ulonglong, - ushort, - void, -) from numpy._core._internal import _ctypes from numpy._core.multiarray import flagsobj from numpy._typing import ( @@ -43,203 +12,211 @@ from numpy._typing import ( _AnyShape, _ArrayLike, _BoolCodes, - _ByteCodes, - _DoubleCodes, _DTypeLike, + _Float32Codes, + _Float64Codes, + _Int8Codes, + _Int16Codes, + _Int32Codes, + _Int64Codes, _IntCCodes, + _IntPCodes, _LongCodes, _LongDoubleCodes, _LongLongCodes, _ShapeLike, - _ShortCodes, - _SingleCodes, - _UByteCodes, + _UInt8Codes, + _UInt16Codes, + _UInt32Codes, + _UInt64Codes, _UIntCCodes, + _UIntPCodes, _ULongCodes, _ULongLongCodes, - _UShortCodes, _VoidDTypeLike, ) __all__ = ["load_library", "ndpointer", "c_intp", "as_ctypes", "as_array", "as_ctypes_type"] -# TODO: Add a proper `_Shape` bound once we've got variadic typevars -_DTypeT = TypeVar("_DTypeT", bound=dtype) -_DTypeOptionalT = TypeVar("_DTypeOptionalT", bound=dtype | None) -_ScalarT = TypeVar("_ScalarT", bound=generic) - -_FlagsKind: TypeAlias = L[ - 'C_CONTIGUOUS', 'CONTIGUOUS', 'C', - 'F_CONTIGUOUS', 'FORTRAN', 'F', - 'ALIGNED', 'A', - 'WRITEABLE', 'W', - 'OWNDATA', 'O', - 'WRITEBACKIFCOPY', 'X', +type _FlagsKind = L[ + "C_CONTIGUOUS", "CONTIGUOUS", "C", + "F_CONTIGUOUS", "FORTRAN", "F", + "ALIGNED", "A", + "WRITEABLE", "W", + "OWNDATA", "O", + "WRITEBACKIFCOPY", "X", ] -# TODO: Add a shape typevar once we have variadic typevars (PEP 646) -class _ndptr(ctypes.c_void_p, Generic[_DTypeOptionalT]): +# TODO: Add a shape type parameter +class _ndptr[OptionalDTypeT: np.dtype | None](ct.c_void_p): # In practice these 4 classvars are defined in the dynamic class # returned by `ndpointer` - _dtype_: ClassVar[_DTypeOptionalT] - _shape_: ClassVar[None] - _ndim_: ClassVar[int | None] - _flags_: ClassVar[list[_FlagsKind] | None] + _dtype_: OptionalDTypeT = ... + _shape_: ClassVar[_AnyShape | None] = ... + _ndim_: ClassVar[int | None] = ... + _flags_: ClassVar[list[_FlagsKind] | None] = ... - @overload + @overload # type: ignore[override] @classmethod - def from_param(cls: type[_ndptr[None]], obj: NDArray[Any]) -> _ctypes[Any]: ... + def from_param(cls: type[_ndptr[None]], obj: np.ndarray) -> _ctypes[Any]: ... @overload @classmethod - def from_param(cls: type[_ndptr[_DTypeT]], obj: ndarray[Any, _DTypeT]) -> _ctypes[Any]: ... + def from_param[DTypeT: np.dtype](cls: type[_ndptr[DTypeT]], obj: np.ndarray[Any, DTypeT]) -> _ctypes[Any]: ... # pyright: ignore[reportIncompatibleMethodOverride] + +class _concrete_ndptr[DTypeT: np.dtype](_ndptr[DTypeT]): + _dtype_: DTypeT = ... + _shape_: ClassVar[_AnyShape] = ... # pyright: ignore[reportIncompatibleVariableOverride] -class _concrete_ndptr(_ndptr[_DTypeT]): - _dtype_: ClassVar[_DTypeT] - _shape_: ClassVar[_AnyShape] @property - def contents(self) -> ndarray[_AnyShape, _DTypeT]: ... + def contents(self) -> np.ndarray[_AnyShape, DTypeT]: ... -def load_library(libname: StrOrBytesPath, loader_path: StrOrBytesPath) -> ctypes.CDLL: ... +def load_library(libname: StrOrBytesPath, loader_path: StrOrBytesPath) -> ct.CDLL: ... -c_intp = _c_intp +c_intp = ct.c_int64 # most platforms are 64-bit nowadays +# @overload def ndpointer( - dtype: None = ..., - ndim: int = ..., - shape: _ShapeLike | None = ..., - flags: _FlagsKind | Iterable[_FlagsKind] | int | flagsobj | None = ..., + dtype: None = None, + ndim: int | None = None, + shape: _ShapeLike | None = None, + flags: _FlagsKind | Iterable[_FlagsKind] | int | flagsobj | None = None, ) -> type[_ndptr[None]]: ... @overload -def ndpointer( - dtype: _DTypeLike[_ScalarT], - ndim: int = ..., +def ndpointer[ScalarT: np.generic]( + dtype: _DTypeLike[ScalarT], + ndim: int | None = None, *, shape: _ShapeLike, - flags: _FlagsKind | Iterable[_FlagsKind] | int | flagsobj | None = ..., -) -> type[_concrete_ndptr[dtype[_ScalarT]]]: ... + flags: _FlagsKind | Iterable[_FlagsKind] | int | flagsobj | None = None, +) -> type[_concrete_ndptr[np.dtype[ScalarT]]]: ... @overload def ndpointer( - dtype: DTypeLike, - ndim: int = ..., + dtype: DTypeLike | None, + ndim: int | None = None, *, shape: _ShapeLike, - flags: _FlagsKind | Iterable[_FlagsKind] | int | flagsobj | None = ..., -) -> type[_concrete_ndptr[dtype]]: ... + flags: _FlagsKind | Iterable[_FlagsKind] | int | flagsobj | None = None, +) -> type[_concrete_ndptr[np.dtype]]: ... @overload -def ndpointer( - dtype: _DTypeLike[_ScalarT], - ndim: int = ..., - shape: None = ..., - flags: _FlagsKind | Iterable[_FlagsKind] | int | flagsobj | None = ..., -) -> type[_ndptr[dtype[_ScalarT]]]: ... +def ndpointer[ScalarT: np.generic]( + dtype: _DTypeLike[ScalarT], + ndim: int | None = None, + shape: None = None, + flags: _FlagsKind | Iterable[_FlagsKind] | int | flagsobj | None = None, +) -> type[_ndptr[np.dtype[ScalarT]]]: ... @overload def ndpointer( - dtype: DTypeLike, - ndim: int = ..., - shape: None = ..., - flags: _FlagsKind | Iterable[_FlagsKind] | int | flagsobj | None = ..., -) -> type[_ndptr[dtype]]: ... + dtype: DTypeLike | None, + ndim: int | None = None, + shape: None = None, + flags: _FlagsKind | Iterable[_FlagsKind] | int | flagsobj | None = None, +) -> type[_ndptr[np.dtype]]: ... -@overload -def as_ctypes_type(dtype: _BoolCodes | _DTypeLike[np.bool] | type[ctypes.c_bool]) -> type[ctypes.c_bool]: ... -@overload -def as_ctypes_type(dtype: _ByteCodes | _DTypeLike[byte] | type[ctypes.c_byte]) -> type[ctypes.c_byte]: ... -@overload -def as_ctypes_type(dtype: _ShortCodes | _DTypeLike[short] | type[ctypes.c_short]) -> type[ctypes.c_short]: ... -@overload -def as_ctypes_type(dtype: _IntCCodes | _DTypeLike[intc] | type[ctypes.c_int]) -> type[ctypes.c_int]: ... -@overload -def as_ctypes_type(dtype: _LongCodes | _DTypeLike[long] | type[ctypes.c_long]) -> type[ctypes.c_long]: ... -@overload -def as_ctypes_type(dtype: type[int]) -> type[c_intp]: ... -@overload -def as_ctypes_type(dtype: _LongLongCodes | _DTypeLike[longlong] | type[ctypes.c_longlong]) -> type[ctypes.c_longlong]: ... -@overload -def as_ctypes_type(dtype: _UByteCodes | _DTypeLike[ubyte] | type[ctypes.c_ubyte]) -> type[ctypes.c_ubyte]: ... -@overload -def as_ctypes_type(dtype: _UShortCodes | _DTypeLike[ushort] | type[ctypes.c_ushort]) -> type[ctypes.c_ushort]: ... -@overload -def as_ctypes_type(dtype: _UIntCCodes | _DTypeLike[uintc] | type[ctypes.c_uint]) -> type[ctypes.c_uint]: ... -@overload -def as_ctypes_type(dtype: _ULongCodes | _DTypeLike[ulong] | type[ctypes.c_ulong]) -> type[ctypes.c_ulong]: ... -@overload -def as_ctypes_type(dtype: _ULongLongCodes | _DTypeLike[ulonglong] | type[ctypes.c_ulonglong]) -> type[ctypes.c_ulonglong]: ... -@overload -def as_ctypes_type(dtype: _SingleCodes | _DTypeLike[single] | type[ctypes.c_float]) -> type[ctypes.c_float]: ... -@overload -def as_ctypes_type(dtype: _DoubleCodes | _DTypeLike[double] | type[float | ctypes.c_double]) -> type[ctypes.c_double]: ... -@overload -def as_ctypes_type(dtype: _LongDoubleCodes | _DTypeLike[longdouble] | type[ctypes.c_longdouble]) -> type[ctypes.c_longdouble]: ... -@overload -def as_ctypes_type(dtype: _VoidDTypeLike) -> type[Any]: ... # `ctypes.Union` or `ctypes.Structure` -@overload +# +@overload # bool +def as_ctypes_type(dtype: _BoolCodes | _DTypeLike[np.bool] | type[ct.c_bool]) -> type[ct.c_bool]: ... +@overload # int8 +def as_ctypes_type(dtype: _Int8Codes | _DTypeLike[np.int8] | type[ct.c_int8]) -> type[ct.c_int8]: ... +@overload # int16 +def as_ctypes_type(dtype: _Int16Codes | _DTypeLike[np.int16] | type[ct.c_int16]) -> type[ct.c_int16]: ... +@overload # int32 +def as_ctypes_type(dtype: _Int32Codes | _DTypeLike[np.int32] | type[ct.c_int32]) -> type[ct.c_int32]: ... +@overload # int64 +def as_ctypes_type(dtype: _Int64Codes | _DTypeLike[np.int64] | type[ct.c_int64]) -> type[ct.c_int64]: ... +@overload # intc +def as_ctypes_type(dtype: _IntCCodes | type[ct.c_int]) -> type[ct.c_int]: ... +@overload # long +def as_ctypes_type(dtype: _LongCodes | type[ct.c_long]) -> type[ct.c_long]: ... +@overload # longlong +def as_ctypes_type(dtype: _LongLongCodes | type[ct.c_longlong]) -> type[ct.c_longlong]: ... +@overload # intp +def as_ctypes_type(dtype: _IntPCodes | type[ct.c_ssize_t] | type[int]) -> type[ct.c_ssize_t]: ... +@overload # uint8 +def as_ctypes_type(dtype: _UInt8Codes | _DTypeLike[np.uint8] | type[ct.c_uint8]) -> type[ct.c_uint8]: ... +@overload # uint16 +def as_ctypes_type(dtype: _UInt16Codes | _DTypeLike[np.uint16] | type[ct.c_uint16]) -> type[ct.c_uint16]: ... +@overload # uint32 +def as_ctypes_type(dtype: _UInt32Codes | _DTypeLike[np.uint32] | type[ct.c_uint32]) -> type[ct.c_uint32]: ... +@overload # uint64 +def as_ctypes_type(dtype: _UInt64Codes | _DTypeLike[np.uint64] | type[ct.c_uint64]) -> type[ct.c_uint64]: ... +@overload # uintc +def as_ctypes_type(dtype: _UIntCCodes | type[ct.c_uint]) -> type[ct.c_uint]: ... +@overload # ulong +def as_ctypes_type(dtype: _ULongCodes | type[ct.c_ulong]) -> type[ct.c_ulong]: ... +@overload # ulonglong +def as_ctypes_type(dtype: _ULongLongCodes | type[ct.c_ulonglong]) -> type[ct.c_ulonglong]: ... +@overload # uintp +def as_ctypes_type(dtype: _UIntPCodes | type[ct.c_size_t]) -> type[ct.c_size_t]: ... +@overload # float32 +def as_ctypes_type(dtype: _Float32Codes | _DTypeLike[np.float32] | type[ct.c_float]) -> type[ct.c_float]: ... +@overload # float64 +def as_ctypes_type(dtype: _Float64Codes | _DTypeLike[np.float64] | type[float | ct.c_double]) -> type[ct.c_double]: ... +@overload # longdouble +def as_ctypes_type(dtype: _LongDoubleCodes | _DTypeLike[np.longdouble] | type[ct.c_longdouble]) -> type[ct.c_longdouble]: ... +@overload # void +def as_ctypes_type(dtype: _VoidDTypeLike) -> type[Any]: ... # `ct.Union` or `ct.Structure` +@overload # fallback def as_ctypes_type(dtype: str) -> type[Any]: ... +# @overload -def as_array(obj: ctypes._PointerLike, shape: Sequence[int]) -> NDArray[Any]: ... +def as_array(obj: ct._PointerLike, shape: Sequence[int]) -> NDArray[Any]: ... @overload -def as_array(obj: _ArrayLike[_ScalarT], shape: _ShapeLike | None = ...) -> NDArray[_ScalarT]: ... +def as_array[ScalarT: np.generic](obj: _ArrayLike[ScalarT], shape: _ShapeLike | None = None) -> NDArray[ScalarT]: ... @overload -def as_array(obj: object, shape: _ShapeLike | None = ...) -> NDArray[Any]: ... +def as_array(obj: object, shape: _ShapeLike | None = None) -> NDArray[Any]: ... +# @overload -def as_ctypes(obj: np.bool) -> ctypes.c_bool: ... -@overload -def as_ctypes(obj: byte) -> ctypes.c_byte: ... -@overload -def as_ctypes(obj: short) -> ctypes.c_short: ... -@overload -def as_ctypes(obj: intc) -> ctypes.c_int: ... -@overload -def as_ctypes(obj: long) -> ctypes.c_long: ... +def as_ctypes(obj: np.bool) -> ct.c_bool: ... @overload -def as_ctypes(obj: longlong) -> ctypes.c_longlong: ... +def as_ctypes(obj: np.int8) -> ct.c_int8: ... @overload -def as_ctypes(obj: ubyte) -> ctypes.c_ubyte: ... +def as_ctypes(obj: np.int16) -> ct.c_int16: ... @overload -def as_ctypes(obj: ushort) -> ctypes.c_ushort: ... +def as_ctypes(obj: np.int32) -> ct.c_int32: ... @overload -def as_ctypes(obj: uintc) -> ctypes.c_uint: ... +def as_ctypes(obj: np.int64) -> ct.c_int64: ... @overload -def as_ctypes(obj: ulong) -> ctypes.c_ulong: ... +def as_ctypes(obj: np.uint8) -> ct.c_uint8: ... @overload -def as_ctypes(obj: ulonglong) -> ctypes.c_ulonglong: ... +def as_ctypes(obj: np.uint16) -> ct.c_uint16: ... @overload -def as_ctypes(obj: single) -> ctypes.c_float: ... +def as_ctypes(obj: np.uint32) -> ct.c_uint32: ... @overload -def as_ctypes(obj: double) -> ctypes.c_double: ... +def as_ctypes(obj: np.uint64) -> ct.c_uint64: ... @overload -def as_ctypes(obj: longdouble) -> ctypes.c_longdouble: ... +def as_ctypes(obj: np.float32) -> ct.c_float: ... @overload -def as_ctypes(obj: void) -> Any: ... # `ctypes.Union` or `ctypes.Structure` +def as_ctypes(obj: np.float64) -> ct.c_double: ... @overload -def as_ctypes(obj: NDArray[np.bool]) -> ctypes.Array[ctypes.c_bool]: ... +def as_ctypes(obj: np.longdouble) -> ct.c_longdouble: ... @overload -def as_ctypes(obj: NDArray[byte]) -> ctypes.Array[ctypes.c_byte]: ... +def as_ctypes(obj: np.void) -> Any: ... # `ct.Union` or `ct.Structure` @overload -def as_ctypes(obj: NDArray[short]) -> ctypes.Array[ctypes.c_short]: ... +def as_ctypes(obj: NDArray[np.bool]) -> ct.Array[ct.c_bool]: ... @overload -def as_ctypes(obj: NDArray[intc]) -> ctypes.Array[ctypes.c_int]: ... +def as_ctypes(obj: NDArray[np.int8]) -> ct.Array[ct.c_int8]: ... @overload -def as_ctypes(obj: NDArray[long]) -> ctypes.Array[ctypes.c_long]: ... +def as_ctypes(obj: NDArray[np.int16]) -> ct.Array[ct.c_int16]: ... @overload -def as_ctypes(obj: NDArray[longlong]) -> ctypes.Array[ctypes.c_longlong]: ... +def as_ctypes(obj: NDArray[np.int32]) -> ct.Array[ct.c_int32]: ... @overload -def as_ctypes(obj: NDArray[ubyte]) -> ctypes.Array[ctypes.c_ubyte]: ... +def as_ctypes(obj: NDArray[np.int64]) -> ct.Array[ct.c_int64]: ... @overload -def as_ctypes(obj: NDArray[ushort]) -> ctypes.Array[ctypes.c_ushort]: ... +def as_ctypes(obj: NDArray[np.uint8]) -> ct.Array[ct.c_uint8]: ... @overload -def as_ctypes(obj: NDArray[uintc]) -> ctypes.Array[ctypes.c_uint]: ... +def as_ctypes(obj: NDArray[np.uint16]) -> ct.Array[ct.c_uint16]: ... @overload -def as_ctypes(obj: NDArray[ulong]) -> ctypes.Array[ctypes.c_ulong]: ... +def as_ctypes(obj: NDArray[np.uint32]) -> ct.Array[ct.c_uint32]: ... @overload -def as_ctypes(obj: NDArray[ulonglong]) -> ctypes.Array[ctypes.c_ulonglong]: ... +def as_ctypes(obj: NDArray[np.uint64]) -> ct.Array[ct.c_uint64]: ... @overload -def as_ctypes(obj: NDArray[single]) -> ctypes.Array[ctypes.c_float]: ... +def as_ctypes(obj: NDArray[np.float32]) -> ct.Array[ct.c_float]: ... @overload -def as_ctypes(obj: NDArray[double]) -> ctypes.Array[ctypes.c_double]: ... +def as_ctypes(obj: NDArray[np.float64]) -> ct.Array[ct.c_double]: ... @overload -def as_ctypes(obj: NDArray[longdouble]) -> ctypes.Array[ctypes.c_longdouble]: ... +def as_ctypes(obj: NDArray[np.longdouble]) -> ct.Array[ct.c_longdouble]: ... @overload -def as_ctypes(obj: NDArray[void]) -> ctypes.Array[Any]: ... # `ctypes.Union` or `ctypes.Structure` +def as_ctypes(obj: NDArray[np.void]) -> ct.Array[Any]: ... # `ct.Union` or `ct.Structure` diff --git a/numpy/distutils/__init__.py b/numpy/distutils/__init__.py deleted file mode 100644 index f74ed4d3f6db..000000000000 --- a/numpy/distutils/__init__.py +++ /dev/null @@ -1,64 +0,0 @@ -""" -An enhanced distutils, providing support for Fortran compilers, for BLAS, -LAPACK and other common libraries for numerical computing, and more. - -Public submodules are:: - - misc_util - system_info - cpu_info - log - exec_command - -For details, please see the *Packaging* and *NumPy Distutils User Guide* -sections of the NumPy Reference Guide. - -For configuring the preference for and location of libraries like BLAS and -LAPACK, and for setting include paths and similar build options, please see -``site.cfg.example`` in the root of the NumPy repository or sdist. - -""" - -import warnings - -# Must import local ccompiler ASAP in order to get -# customized CCompiler.spawn effective. -from . import ccompiler -from . import unixccompiler - -from .npy_pkg_config import * - -warnings.warn("\n\n" - " `numpy.distutils` is deprecated since NumPy 1.23.0, as a result\n" - " of the deprecation of `distutils` itself. It will be removed for\n" - " Python >= 3.12. For older Python versions it will remain present.\n" - " It is recommended to use `setuptools < 60.0` for those Python versions.\n" - " For more details, see:\n" - " https://numpy.org/devdocs/reference/distutils_status_migration.html \n\n", - DeprecationWarning, stacklevel=2 -) -del warnings - -# If numpy is installed, add distutils.test() -try: - from . import __config__ - # Normally numpy is installed if the above import works, but an interrupted - # in-place build could also have left a __config__.py. In that case the - # next import may still fail, so keep it inside the try block. - from numpy._pytesttester import PytestTester - test = PytestTester(__name__) - del PytestTester -except ImportError: - pass - - -def customized_fcompiler(plat=None, compiler=None): - from numpy.distutils.fcompiler import new_fcompiler - c = new_fcompiler(plat=plat, compiler=compiler) - c.customize() - return c - -def customized_ccompiler(plat=None, compiler=None, verbose=1): - c = ccompiler.new_compiler(plat=plat, compiler=compiler, verbose=verbose) - c.customize('') - return c diff --git a/numpy/distutils/__init__.pyi b/numpy/distutils/__init__.pyi deleted file mode 100644 index 3938d68de14c..000000000000 --- a/numpy/distutils/__init__.pyi +++ /dev/null @@ -1,4 +0,0 @@ -from typing import Any - -# TODO: remove when the full numpy namespace is defined -def __getattr__(name: str) -> Any: ... diff --git a/numpy/distutils/_shell_utils.py b/numpy/distutils/_shell_utils.py deleted file mode 100644 index 9a1c8ce718c9..000000000000 --- a/numpy/distutils/_shell_utils.py +++ /dev/null @@ -1,87 +0,0 @@ -""" -Helper functions for interacting with the shell, and consuming shell-style -parameters provided in config files. -""" -import os -import shlex -import subprocess - -__all__ = ['WindowsParser', 'PosixParser', 'NativeParser'] - - -class CommandLineParser: - """ - An object that knows how to split and join command-line arguments. - - It must be true that ``argv == split(join(argv))`` for all ``argv``. - The reverse neednt be true - `join(split(cmd))` may result in the addition - or removal of unnecessary escaping. - """ - @staticmethod - def join(argv): - """ Join a list of arguments into a command line string """ - raise NotImplementedError - - @staticmethod - def split(cmd): - """ Split a command line string into a list of arguments """ - raise NotImplementedError - - -class WindowsParser: - """ - The parsing behavior used by `subprocess.call("string")` on Windows, which - matches the Microsoft C/C++ runtime. - - Note that this is _not_ the behavior of cmd. - """ - @staticmethod - def join(argv): - # note that list2cmdline is specific to the windows syntax - return subprocess.list2cmdline(argv) - - @staticmethod - def split(cmd): - import ctypes # guarded import for systems without ctypes - try: - ctypes.windll - except AttributeError: - raise NotImplementedError - - # Windows has special parsing rules for the executable (no quotes), - # that we do not care about - insert a dummy element - if not cmd: - return [] - cmd = 'dummy ' + cmd - - CommandLineToArgvW = ctypes.windll.shell32.CommandLineToArgvW - CommandLineToArgvW.restype = ctypes.POINTER(ctypes.c_wchar_p) - CommandLineToArgvW.argtypes = (ctypes.c_wchar_p, ctypes.POINTER(ctypes.c_int)) - - nargs = ctypes.c_int() - lpargs = CommandLineToArgvW(cmd, ctypes.byref(nargs)) - args = [lpargs[i] for i in range(nargs.value)] - assert not ctypes.windll.kernel32.LocalFree(lpargs) - - # strip the element we inserted - assert args[0] == "dummy" - return args[1:] - - -class PosixParser: - """ - The parsing behavior used by `subprocess.call("string", shell=True)` on Posix. - """ - @staticmethod - def join(argv): - return ' '.join(shlex.quote(arg) for arg in argv) - - @staticmethod - def split(cmd): - return shlex.split(cmd, posix=True) - - -if os.name == 'nt': - NativeParser = WindowsParser -elif os.name == 'posix': - NativeParser = PosixParser diff --git a/numpy/distutils/armccompiler.py b/numpy/distutils/armccompiler.py deleted file mode 100644 index afba7eb3b352..000000000000 --- a/numpy/distutils/armccompiler.py +++ /dev/null @@ -1,26 +0,0 @@ -from distutils.unixccompiler import UnixCCompiler - -class ArmCCompiler(UnixCCompiler): - - """ - Arm compiler. - """ - - compiler_type = 'arm' - cc_exe = 'armclang' - cxx_exe = 'armclang++' - - def __init__(self, verbose=0, dry_run=0, force=0): - UnixCCompiler.__init__(self, verbose, dry_run, force) - cc_compiler = self.cc_exe - cxx_compiler = self.cxx_exe - self.set_executables(compiler=cc_compiler + - ' -O3 -fPIC', - compiler_so=cc_compiler + - ' -O3 -fPIC', - compiler_cxx=cxx_compiler + - ' -O3 -fPIC', - linker_exe=cc_compiler + - ' -lamath', - linker_so=cc_compiler + - ' -lamath -shared') diff --git a/numpy/distutils/ccompiler.py b/numpy/distutils/ccompiler.py deleted file mode 100644 index dee13b1c9e84..000000000000 --- a/numpy/distutils/ccompiler.py +++ /dev/null @@ -1,826 +0,0 @@ -import os -import re -import sys -import platform -import shlex -import time -import subprocess -from copy import copy -from pathlib import Path -from distutils import ccompiler -from distutils.ccompiler import ( - compiler_class, gen_lib_options, get_default_compiler, new_compiler, - CCompiler -) -from distutils.errors import ( - DistutilsExecError, DistutilsModuleError, DistutilsPlatformError, - CompileError, UnknownFileError -) -from distutils.sysconfig import customize_compiler -from distutils.version import LooseVersion - -from numpy.distutils import log -from numpy.distutils.exec_command import ( - filepath_from_subprocess_output, forward_bytes_to_stdout -) -from numpy.distutils.misc_util import cyg2win32, is_sequence, mingw32, \ - get_num_build_jobs, \ - _commandline_dep_string, \ - sanitize_cxx_flags - -# globals for parallel build management -import threading - -_job_semaphore = None -_global_lock = threading.Lock() -_processing_files = set() - - -def _needs_build(obj, cc_args, extra_postargs, pp_opts): - """ - Check if an objects needs to be rebuild based on its dependencies - - Parameters - ---------- - obj : str - object file - - Returns - ------- - bool - """ - # defined in unixcompiler.py - dep_file = obj + '.d' - if not os.path.exists(dep_file): - return True - - # dep_file is a makefile containing 'object: dependencies' - # formatted like posix shell (spaces escaped, \ line continuations) - # the last line contains the compiler commandline arguments as some - # projects may compile an extension multiple times with different - # arguments - with open(dep_file) as f: - lines = f.readlines() - - cmdline =_commandline_dep_string(cc_args, extra_postargs, pp_opts) - last_cmdline = lines[-1] - if last_cmdline != cmdline: - return True - - contents = ''.join(lines[:-1]) - deps = [x for x in shlex.split(contents, posix=True) - if x != "\n" and not x.endswith(":")] - - try: - t_obj = os.stat(obj).st_mtime - - # check if any of the dependencies is newer than the object - # the dependencies includes the source used to create the object - for f in deps: - if os.stat(f).st_mtime > t_obj: - return True - except OSError: - # no object counts as newer (shouldn't happen if dep_file exists) - return True - - return False - - -def replace_method(klass, method_name, func): - # Py3k does not have unbound method anymore, MethodType does not work - m = lambda self, *args, **kw: func(self, *args, **kw) - setattr(klass, method_name, m) - - -###################################################################### -## Method that subclasses may redefine. But don't call this method, -## it i private to CCompiler class and may return unexpected -## results if used elsewhere. So, you have been warned.. - -def CCompiler_find_executables(self): - """ - Does nothing here, but is called by the get_version method and can be - overridden by subclasses. In particular it is redefined in the `FCompiler` - class where more documentation can be found. - - """ - pass - - -replace_method(CCompiler, 'find_executables', CCompiler_find_executables) - - -# Using customized CCompiler.spawn. -def CCompiler_spawn(self, cmd, display=None, env=None): - """ - Execute a command in a sub-process. - - Parameters - ---------- - cmd : str - The command to execute. - display : str or sequence of str, optional - The text to add to the log file kept by `numpy.distutils`. - If not given, `display` is equal to `cmd`. - env : a dictionary for environment variables, optional - - Returns - ------- - None - - Raises - ------ - DistutilsExecError - If the command failed, i.e. the exit status was not 0. - - """ - env = env if env is not None else dict(os.environ) - if display is None: - display = cmd - if is_sequence(display): - display = ' '.join(list(display)) - log.info(display) - try: - if self.verbose: - subprocess.check_output(cmd, env=env) - else: - subprocess.check_output(cmd, stderr=subprocess.STDOUT, env=env) - except subprocess.CalledProcessError as exc: - o = exc.output - s = exc.returncode - except OSError as e: - # OSError doesn't have the same hooks for the exception - # output, but exec_command() historically would use an - # empty string for EnvironmentError (base class for - # OSError) - # o = b'' - # still that would make the end-user lost in translation! - o = f"\n\n{e}\n\n\n" - try: - o = o.encode(sys.stdout.encoding) - except AttributeError: - o = o.encode('utf8') - # status previously used by exec_command() for parent - # of OSError - s = 127 - else: - # use a convenience return here so that any kind of - # caught exception will execute the default code after the - # try / except block, which handles various exceptions - return None - - if is_sequence(cmd): - cmd = ' '.join(list(cmd)) - - if self.verbose: - forward_bytes_to_stdout(o) - - if re.search(b'Too many open files', o): - msg = '\nTry rerunning setup command until build succeeds.' - else: - msg = '' - raise DistutilsExecError('Command "%s" failed with exit status %d%s' % - (cmd, s, msg)) - -replace_method(CCompiler, 'spawn', CCompiler_spawn) - -def CCompiler_object_filenames(self, source_filenames, strip_dir=0, output_dir=''): - """ - Return the name of the object files for the given source files. - - Parameters - ---------- - source_filenames : list of str - The list of paths to source files. Paths can be either relative or - absolute, this is handled transparently. - strip_dir : bool, optional - Whether to strip the directory from the returned paths. If True, - the file name prepended by `output_dir` is returned. Default is False. - output_dir : str, optional - If given, this path is prepended to the returned paths to the - object files. - - Returns - ------- - obj_names : list of str - The list of paths to the object files corresponding to the source - files in `source_filenames`. - - """ - if output_dir is None: - output_dir = '' - obj_names = [] - for src_name in source_filenames: - base, ext = os.path.splitext(os.path.normpath(src_name)) - base = os.path.splitdrive(base)[1] # Chop off the drive - base = base[os.path.isabs(base):] # If abs, chop off leading / - if base.startswith('..'): - # Resolve starting relative path components, middle ones - # (if any) have been handled by os.path.normpath above. - i = base.rfind('..')+2 - d = base[:i] - d = os.path.basename(os.path.abspath(d)) - base = d + base[i:] - if ext not in self.src_extensions: - raise UnknownFileError("unknown file type '%s' (from '%s')" % (ext, src_name)) - if strip_dir: - base = os.path.basename(base) - obj_name = os.path.join(output_dir, base + self.obj_extension) - obj_names.append(obj_name) - return obj_names - -replace_method(CCompiler, 'object_filenames', CCompiler_object_filenames) - -def CCompiler_compile(self, sources, output_dir=None, macros=None, - include_dirs=None, debug=0, extra_preargs=None, - extra_postargs=None, depends=None): - """ - Compile one or more source files. - - Please refer to the Python distutils API reference for more details. - - Parameters - ---------- - sources : list of str - A list of filenames - output_dir : str, optional - Path to the output directory. - macros : list of tuples - A list of macro definitions. - include_dirs : list of str, optional - The directories to add to the default include file search path for - this compilation only. - debug : bool, optional - Whether or not to output debug symbols in or alongside the object - file(s). - extra_preargs, extra_postargs : ? - Extra pre- and post-arguments. - depends : list of str, optional - A list of file names that all targets depend on. - - Returns - ------- - objects : list of str - A list of object file names, one per source file `sources`. - - Raises - ------ - CompileError - If compilation fails. - - """ - global _job_semaphore - - jobs = get_num_build_jobs() - - # setup semaphore to not exceed number of compile jobs when parallelized at - # extension level (python >= 3.5) - with _global_lock: - if _job_semaphore is None: - _job_semaphore = threading.Semaphore(jobs) - - if not sources: - return [] - from numpy.distutils.fcompiler import (FCompiler, - FORTRAN_COMMON_FIXED_EXTENSIONS, - has_f90_header) - if isinstance(self, FCompiler): - display = [] - for fc in ['f77', 'f90', 'fix']: - fcomp = getattr(self, 'compiler_'+fc) - if fcomp is None: - continue - display.append("Fortran %s compiler: %s" % (fc, ' '.join(fcomp))) - display = '\n'.join(display) - else: - ccomp = self.compiler_so - display = "C compiler: %s\n" % (' '.join(ccomp),) - log.info(display) - macros, objects, extra_postargs, pp_opts, build = \ - self._setup_compile(output_dir, macros, include_dirs, sources, - depends, extra_postargs) - cc_args = self._get_cc_args(pp_opts, debug, extra_preargs) - display = "compile options: '%s'" % (' '.join(cc_args)) - if extra_postargs: - display += "\nextra options: '%s'" % (' '.join(extra_postargs)) - log.info(display) - - def single_compile(args): - obj, (src, ext) = args - if not _needs_build(obj, cc_args, extra_postargs, pp_opts): - return - - # check if we are currently already processing the same object - # happens when using the same source in multiple extensions - while True: - # need explicit lock as there is no atomic check and add with GIL - with _global_lock: - # file not being worked on, start working - if obj not in _processing_files: - _processing_files.add(obj) - break - # wait for the processing to end - time.sleep(0.1) - - try: - # retrieve slot from our #job semaphore and build - with _job_semaphore: - self._compile(obj, src, ext, cc_args, extra_postargs, pp_opts) - finally: - # register being done processing - with _global_lock: - _processing_files.remove(obj) - - - if isinstance(self, FCompiler): - objects_to_build = list(build.keys()) - f77_objects, other_objects = [], [] - for obj in objects: - if obj in objects_to_build: - src, ext = build[obj] - if self.compiler_type=='absoft': - obj = cyg2win32(obj) - src = cyg2win32(src) - if Path(src).suffix.lower() in FORTRAN_COMMON_FIXED_EXTENSIONS \ - and not has_f90_header(src): - f77_objects.append((obj, (src, ext))) - else: - other_objects.append((obj, (src, ext))) - - # f77 objects can be built in parallel - build_items = f77_objects - # build f90 modules serial, module files are generated during - # compilation and may be used by files later in the list so the - # ordering is important - for o in other_objects: - single_compile(o) - else: - build_items = build.items() - - if len(build) > 1 and jobs > 1: - # build parallel - from concurrent.futures import ThreadPoolExecutor - with ThreadPoolExecutor(jobs) as pool: - res = pool.map(single_compile, build_items) - list(res) # access result to raise errors - else: - # build serial - for o in build_items: - single_compile(o) - - # Return *all* object filenames, not just the ones we just built. - return objects - -replace_method(CCompiler, 'compile', CCompiler_compile) - -def CCompiler_customize_cmd(self, cmd, ignore=()): - """ - Customize compiler using distutils command. - - Parameters - ---------- - cmd : class instance - An instance inheriting from ``distutils.cmd.Command``. - ignore : sequence of str, optional - List of ``distutils.ccompiler.CCompiler`` commands (without ``'set_'``) that should not be - altered. Strings that are checked for are: - ``('include_dirs', 'define', 'undef', 'libraries', 'library_dirs', - 'rpath', 'link_objects')``. - - Returns - ------- - None - - """ - log.info('customize %s using %s' % (self.__class__.__name__, - cmd.__class__.__name__)) - - if ( - hasattr(self, 'compiler') and - 'clang' in self.compiler[0] and - not (platform.machine() == 'arm64' and sys.platform == 'darwin') - ): - # clang defaults to a non-strict floating error point model. - # However, '-ftrapping-math' is not currently supported (2023-04-08) - # for macosx_arm64. - # Since NumPy and most Python libs give warnings for these, override: - self.compiler.append('-ftrapping-math') - self.compiler_so.append('-ftrapping-math') - - def allow(attr): - return getattr(cmd, attr, None) is not None and attr not in ignore - - if allow('include_dirs'): - self.set_include_dirs(cmd.include_dirs) - if allow('define'): - for (name, value) in cmd.define: - self.define_macro(name, value) - if allow('undef'): - for macro in cmd.undef: - self.undefine_macro(macro) - if allow('libraries'): - self.set_libraries(self.libraries + cmd.libraries) - if allow('library_dirs'): - self.set_library_dirs(self.library_dirs + cmd.library_dirs) - if allow('rpath'): - self.set_runtime_library_dirs(cmd.rpath) - if allow('link_objects'): - self.set_link_objects(cmd.link_objects) - -replace_method(CCompiler, 'customize_cmd', CCompiler_customize_cmd) - -def _compiler_to_string(compiler): - props = [] - mx = 0 - keys = list(compiler.executables.keys()) - for key in ['version', 'libraries', 'library_dirs', - 'object_switch', 'compile_switch', - 'include_dirs', 'define', 'undef', 'rpath', 'link_objects']: - if key not in keys: - keys.append(key) - for key in keys: - if hasattr(compiler, key): - v = getattr(compiler, key) - mx = max(mx, len(key)) - props.append((key, repr(v))) - fmt = '%-' + repr(mx+1) + 's = %s' - lines = [fmt % prop for prop in props] - return '\n'.join(lines) - -def CCompiler_show_customization(self): - """ - Print the compiler customizations to stdout. - - Parameters - ---------- - None - - Returns - ------- - None - - Notes - ----- - Printing is only done if the distutils log threshold is < 2. - - """ - try: - self.get_version() - except Exception: - pass - if log._global_log.threshold<2: - print('*'*80) - print(self.__class__) - print(_compiler_to_string(self)) - print('*'*80) - -replace_method(CCompiler, 'show_customization', CCompiler_show_customization) - -def CCompiler_customize(self, dist, need_cxx=0): - """ - Do any platform-specific customization of a compiler instance. - - This method calls ``distutils.sysconfig.customize_compiler`` for - platform-specific customization, as well as optionally remove a flag - to suppress spurious warnings in case C++ code is being compiled. - - Parameters - ---------- - dist : object - This parameter is not used for anything. - need_cxx : bool, optional - Whether or not C++ has to be compiled. If so (True), the - ``"-Wstrict-prototypes"`` option is removed to prevent spurious - warnings. Default is False. - - Returns - ------- - None - - Notes - ----- - All the default options used by distutils can be extracted with:: - - from distutils import sysconfig - sysconfig.get_config_vars('CC', 'CXX', 'OPT', 'BASECFLAGS', - 'CCSHARED', 'LDSHARED', 'SO') - - """ - # See FCompiler.customize for suggested usage. - log.info('customize %s' % (self.__class__.__name__)) - customize_compiler(self) - if need_cxx: - # In general, distutils uses -Wstrict-prototypes, but this option is - # not valid for C++ code, only for C. Remove it if it's there to - # avoid a spurious warning on every compilation. - try: - self.compiler_so.remove('-Wstrict-prototypes') - except (AttributeError, ValueError): - pass - - if hasattr(self, 'compiler') and 'cc' in self.compiler[0]: - if not self.compiler_cxx: - if self.compiler[0].startswith('gcc'): - a, b = 'gcc', 'g++' - else: - a, b = 'cc', 'c++' - self.compiler_cxx = [self.compiler[0].replace(a, b)]\ - + self.compiler[1:] - else: - if hasattr(self, 'compiler'): - log.warn("#### %s #######" % (self.compiler,)) - if not hasattr(self, 'compiler_cxx'): - log.warn('Missing compiler_cxx fix for ' + self.__class__.__name__) - - - # check if compiler supports gcc style automatic dependencies - # run on every extension so skip for known good compilers - if hasattr(self, 'compiler') and ('gcc' in self.compiler[0] or - 'g++' in self.compiler[0] or - 'clang' in self.compiler[0]): - self._auto_depends = True - elif os.name == 'posix': - import tempfile - import shutil - tmpdir = tempfile.mkdtemp() - try: - fn = os.path.join(tmpdir, "file.c") - with open(fn, "w") as f: - f.write("int a;\n") - self.compile([fn], output_dir=tmpdir, - extra_preargs=['-MMD', '-MF', fn + '.d']) - self._auto_depends = True - except CompileError: - self._auto_depends = False - finally: - shutil.rmtree(tmpdir) - - return - -replace_method(CCompiler, 'customize', CCompiler_customize) - -def simple_version_match(pat=r'[-.\d]+', ignore='', start=''): - """ - Simple matching of version numbers, for use in CCompiler and FCompiler. - - Parameters - ---------- - pat : str, optional - A regular expression matching version numbers. - Default is ``r'[-.\\d]+'``. - ignore : str, optional - A regular expression matching patterns to skip. - Default is ``''``, in which case nothing is skipped. - start : str, optional - A regular expression matching the start of where to start looking - for version numbers. - Default is ``''``, in which case searching is started at the - beginning of the version string given to `matcher`. - - Returns - ------- - matcher : callable - A function that is appropriate to use as the ``.version_match`` - attribute of a ``distutils.ccompiler.CCompiler`` class. `matcher` takes a single parameter, - a version string. - - """ - def matcher(self, version_string): - # version string may appear in the second line, so getting rid - # of new lines: - version_string = version_string.replace('\n', ' ') - pos = 0 - if start: - m = re.match(start, version_string) - if not m: - return None - pos = m.end() - while True: - m = re.search(pat, version_string[pos:]) - if not m: - return None - if ignore and re.match(ignore, m.group(0)): - pos = m.end() - continue - break - return m.group(0) - return matcher - -def CCompiler_get_version(self, force=False, ok_status=[0]): - """ - Return compiler version, or None if compiler is not available. - - Parameters - ---------- - force : bool, optional - If True, force a new determination of the version, even if the - compiler already has a version attribute. Default is False. - ok_status : list of int, optional - The list of status values returned by the version look-up process - for which a version string is returned. If the status value is not - in `ok_status`, None is returned. Default is ``[0]``. - - Returns - ------- - version : str or None - Version string, in the format of ``distutils.version.LooseVersion``. - - """ - if not force and hasattr(self, 'version'): - return self.version - self.find_executables() - try: - version_cmd = self.version_cmd - except AttributeError: - return None - if not version_cmd or not version_cmd[0]: - return None - try: - matcher = self.version_match - except AttributeError: - try: - pat = self.version_pattern - except AttributeError: - return None - def matcher(version_string): - m = re.match(pat, version_string) - if not m: - return None - version = m.group('version') - return version - - try: - output = subprocess.check_output(version_cmd, stderr=subprocess.STDOUT) - except subprocess.CalledProcessError as exc: - output = exc.output - status = exc.returncode - except OSError: - # match the historical returns for a parent - # exception class caught by exec_command() - status = 127 - output = b'' - else: - # output isn't actually a filepath but we do this - # for now to match previous distutils behavior - output = filepath_from_subprocess_output(output) - status = 0 - - version = None - if status in ok_status: - version = matcher(output) - if version: - version = LooseVersion(version) - self.version = version - return version - -replace_method(CCompiler, 'get_version', CCompiler_get_version) - -def CCompiler_cxx_compiler(self): - """ - Return the C++ compiler. - - Parameters - ---------- - None - - Returns - ------- - cxx : class instance - The C++ compiler, as a ``distutils.ccompiler.CCompiler`` instance. - - """ - if self.compiler_type in ('msvc', 'intelw', 'intelemw'): - return self - - cxx = copy(self) - cxx.compiler_cxx = cxx.compiler_cxx - cxx.compiler_so = [cxx.compiler_cxx[0]] + \ - sanitize_cxx_flags(cxx.compiler_so[1:]) - if (sys.platform.startswith(('aix', 'os400')) and - 'ld_so_aix' in cxx.linker_so[0]): - # AIX needs the ld_so_aix script included with Python - cxx.linker_so = [cxx.linker_so[0], cxx.compiler_cxx[0]] \ - + cxx.linker_so[2:] - if sys.platform.startswith('os400'): - #This is required by i 7.4 and prievous for PRId64 in printf() call. - cxx.compiler_so.append('-D__STDC_FORMAT_MACROS') - #This a bug of gcc10.3, which failed to handle the TLS init. - cxx.compiler_so.append('-fno-extern-tls-init') - cxx.linker_so.append('-fno-extern-tls-init') - else: - cxx.linker_so = [cxx.compiler_cxx[0]] + cxx.linker_so[1:] - return cxx - -replace_method(CCompiler, 'cxx_compiler', CCompiler_cxx_compiler) - -compiler_class['intel'] = ('intelccompiler', 'IntelCCompiler', - "Intel C Compiler for 32-bit applications") -compiler_class['intele'] = ('intelccompiler', 'IntelItaniumCCompiler', - "Intel C Itanium Compiler for Itanium-based applications") -compiler_class['intelem'] = ('intelccompiler', 'IntelEM64TCCompiler', - "Intel C Compiler for 64-bit applications") -compiler_class['intelw'] = ('intelccompiler', 'IntelCCompilerW', - "Intel C Compiler for 32-bit applications on Windows") -compiler_class['intelemw'] = ('intelccompiler', 'IntelEM64TCCompilerW', - "Intel C Compiler for 64-bit applications on Windows") -compiler_class['pathcc'] = ('pathccompiler', 'PathScaleCCompiler', - "PathScale Compiler for SiCortex-based applications") -compiler_class['arm'] = ('armccompiler', 'ArmCCompiler', - "Arm C Compiler") -compiler_class['fujitsu'] = ('fujitsuccompiler', 'FujitsuCCompiler', - "Fujitsu C Compiler") - -ccompiler._default_compilers += (('linux.*', 'intel'), - ('linux.*', 'intele'), - ('linux.*', 'intelem'), - ('linux.*', 'pathcc'), - ('nt', 'intelw'), - ('nt', 'intelemw')) - -if sys.platform == 'win32': - compiler_class['mingw32'] = ('mingw32ccompiler', 'Mingw32CCompiler', - "Mingw32 port of GNU C Compiler for Win32"\ - "(for MSC built Python)") - if mingw32(): - # On windows platforms, we want to default to mingw32 (gcc) - # because msvc can't build blitz stuff. - log.info('Setting mingw32 as default compiler for nt.') - ccompiler._default_compilers = (('nt', 'mingw32'),) \ - + ccompiler._default_compilers - - -_distutils_new_compiler = new_compiler -def new_compiler (plat=None, - compiler=None, - verbose=None, - dry_run=0, - force=0): - # Try first C compilers from numpy.distutils. - if verbose is None: - verbose = log.get_threshold() <= log.INFO - if plat is None: - plat = os.name - try: - if compiler is None: - compiler = get_default_compiler(plat) - (module_name, class_name, long_description) = compiler_class[compiler] - except KeyError: - msg = "don't know how to compile C/C++ code on platform '%s'" % plat - if compiler is not None: - msg = msg + " with '%s' compiler" % compiler - raise DistutilsPlatformError(msg) - module_name = "numpy.distutils." + module_name - try: - __import__ (module_name) - except ImportError as e: - msg = str(e) - log.info('%s in numpy.distutils; trying from distutils', - str(msg)) - module_name = module_name[6:] - try: - __import__(module_name) - except ImportError as e: - msg = str(e) - raise DistutilsModuleError("can't compile C/C++ code: unable to load " - "module '%s'" % module_name) - try: - module = sys.modules[module_name] - klass = vars(module)[class_name] - except KeyError: - raise DistutilsModuleError(("can't compile C/C++ code: unable to find " - "class '%s' in module '%s'") % (class_name, module_name)) - compiler = klass(None, dry_run, force) - compiler.verbose = verbose - log.debug('new_compiler returns %s' % (klass)) - return compiler - -ccompiler.new_compiler = new_compiler - -_distutils_gen_lib_options = gen_lib_options -def gen_lib_options(compiler, library_dirs, runtime_library_dirs, libraries): - # the version of this function provided by CPython allows the following - # to return lists, which are unpacked automatically: - # - compiler.runtime_library_dir_option - # our version extends the behavior to: - # - compiler.library_dir_option - # - compiler.library_option - # - compiler.find_library_file - r = _distutils_gen_lib_options(compiler, library_dirs, - runtime_library_dirs, libraries) - lib_opts = [] - for i in r: - if is_sequence(i): - lib_opts.extend(list(i)) - else: - lib_opts.append(i) - return lib_opts -ccompiler.gen_lib_options = gen_lib_options - -# Also fix up the various compiler modules, which do -# from distutils.ccompiler import gen_lib_options -# Don't bother with mwerks, as we don't support Classic Mac. -for _cc in ['msvc9', 'msvc', '_msvc', 'bcpp', 'cygwinc', 'emxc', 'unixc']: - _m = sys.modules.get('distutils.' + _cc + 'compiler') - if _m is not None: - setattr(_m, 'gen_lib_options', gen_lib_options) - diff --git a/numpy/distutils/ccompiler_opt.py b/numpy/distutils/ccompiler_opt.py deleted file mode 100644 index 4dea2f9b1da1..000000000000 --- a/numpy/distutils/ccompiler_opt.py +++ /dev/null @@ -1,2668 +0,0 @@ -"""Provides the `CCompilerOpt` class, used for handling the CPU/hardware -optimization, starting from parsing the command arguments, to managing the -relation between the CPU baseline and dispatch-able features, -also generating the required C headers and ending with compiling -the sources with proper compiler's flags. - -`CCompilerOpt` doesn't provide runtime detection for the CPU features, -instead only focuses on the compiler side, but it creates abstract C headers -that can be used later for the final runtime dispatching process.""" - -import atexit -import inspect -import os -import pprint -import re -import subprocess -import textwrap - -class _Config: - """An abstract class holds all configurable attributes of `CCompilerOpt`, - these class attributes can be used to change the default behavior - of `CCompilerOpt` in order to fit other requirements. - - Attributes - ---------- - conf_nocache : bool - Set True to disable memory and file cache. - Default is False. - - conf_noopt : bool - Set True to forces the optimization to be disabled, - in this case `CCompilerOpt` tends to generate all - expected headers in order to 'not' break the build. - Default is False. - - conf_cache_factors : list - Add extra factors to the primary caching factors. The caching factors - are utilized to determine if there are changes had happened that - requires to discard the cache and re-updating it. The primary factors - are the arguments of `CCompilerOpt` and `CCompiler`'s properties(type, flags, etc). - Default is list of two items, containing the time of last modification - of `ccompiler_opt` and value of attribute "conf_noopt" - - conf_tmp_path : str, - The path of temporary directory. Default is auto-created - temporary directory via ``tempfile.mkdtemp()``. - - conf_check_path : str - The path of testing files. Each added CPU feature must have a - **C** source file contains at least one intrinsic or instruction that - related to this feature, so it can be tested against the compiler. - Default is ``./distutils/checks``. - - conf_target_groups : dict - Extra tokens that can be reached from dispatch-able sources through - the special mark ``@targets``. Default is an empty dictionary. - - **Notes**: - - case-insensitive for tokens and group names - - sign '#' must stick in the begin of group name and only within ``@targets`` - - **Example**: - .. code-block:: console - - $ "@targets #avx_group other_tokens" > group_inside.c - - >>> CCompilerOpt.conf_target_groups["avx_group"] = \\ - "$werror $maxopt avx2 avx512f avx512_skx" - >>> cco = CCompilerOpt(cc_instance) - >>> cco.try_dispatch(["group_inside.c"]) - - conf_c_prefix : str - The prefix of public C definitions. Default is ``"NPY_"``. - - conf_c_prefix_ : str - The prefix of internal C definitions. Default is ``"NPY__"``. - - conf_cc_flags : dict - Nested dictionaries defining several compiler flags - that linked to some major functions, the main key - represent the compiler name and sub-keys represent - flags names. Default is already covers all supported - **C** compilers. - - Sub-keys explained as follows: - - "native": str or None - used by argument option `native`, to detect the current - machine support via the compiler. - "werror": str or None - utilized to treat warning as errors during testing CPU features - against the compiler and also for target's policy `$werror` - via dispatch-able sources. - "maxopt": str or None - utilized for target's policy '$maxopt' and the value should - contains the maximum acceptable optimization by the compiler. - e.g. in gcc ``'-O3'`` - - **Notes**: - * case-sensitive for compiler names and flags - * use space to separate multiple flags - * any flag will tested against the compiler and it will skipped - if it's not applicable. - - conf_min_features : dict - A dictionary defines the used CPU features for - argument option ``'min'``, the key represent the CPU architecture - name e.g. ``'x86'``. Default values provide the best effort - on wide range of users platforms. - - **Note**: case-sensitive for architecture names. - - conf_features : dict - Nested dictionaries used for identifying the CPU features. - the primary key is represented as a feature name or group name - that gathers several features. Default values covers all - supported features but without the major options like "flags", - these undefined options handle it by method `conf_features_partial()`. - Default value is covers almost all CPU features for *X86*, *IBM/Power64* - and *ARM 7/8*. - - Sub-keys explained as follows: - - "implies" : str or list, optional, - List of CPU feature names to be implied by it, - the feature name must be defined within `conf_features`. - Default is None. - - "flags": str or list, optional - List of compiler flags. Default is None. - - "detect": str or list, optional - List of CPU feature names that required to be detected - in runtime. By default, its the feature name or features - in "group" if its specified. - - "implies_detect": bool, optional - If True, all "detect" of implied features will be combined. - Default is True. see `feature_detect()`. - - "group": str or list, optional - Same as "implies" but doesn't require the feature name to be - defined within `conf_features`. - - "interest": int, required - a key for sorting CPU features - - "headers": str or list, optional - intrinsics C header file - - "disable": str, optional - force disable feature, the string value should contains the - reason of disabling. - - "autovec": bool or None, optional - True or False to declare that CPU feature can be auto-vectorized - by the compiler. - By default(None), treated as True if the feature contains at - least one applicable flag. see `feature_can_autovec()` - - "extra_checks": str or list, optional - Extra test case names for the CPU feature that need to be tested - against the compiler. - - Each test case must have a C file named ``extra_xxxx.c``, where - ``xxxx`` is the case name in lower case, under 'conf_check_path'. - It should contain at least one intrinsic or function related to the test case. - - If the compiler able to successfully compile the C file then `CCompilerOpt` - will add a C ``#define`` for it into the main dispatch header, e.g. - ``#define {conf_c_prefix}_XXXX`` where ``XXXX`` is the case name in upper case. - - **NOTES**: - * space can be used as separator with options that supports "str or list" - * case-sensitive for all values and feature name must be in upper-case. - * if flags aren't applicable, its will skipped rather than disable the - CPU feature - * the CPU feature will disabled if the compiler fail to compile - the test file - """ - conf_nocache = False - conf_noopt = False - conf_cache_factors = None - conf_tmp_path = None - conf_check_path = os.path.join( - os.path.dirname(os.path.realpath(__file__)), "checks" - ) - conf_target_groups = {} - conf_c_prefix = 'NPY_' - conf_c_prefix_ = 'NPY__' - conf_cc_flags = dict( - gcc = dict( - # native should always fail on arm and ppc64, - # native usually works only with x86 - native = '-march=native', - opt = '-O3', - werror = '-Werror', - ), - clang = dict( - native = '-march=native', - opt = "-O3", - # One of the following flags needs to be applicable for Clang to - # guarantee the sanity of the testing process, however in certain - # cases `-Werror` gets skipped during the availability test due to - # "unused arguments" warnings. - # see https://github.com/numpy/numpy/issues/19624 - werror = '-Werror=switch -Werror', - ), - icc = dict( - native = '-xHost', - opt = '-O3', - werror = '-Werror', - ), - iccw = dict( - native = '/QxHost', - opt = '/O3', - werror = '/Werror', - ), - msvc = dict( - native = None, - opt = '/O2', - werror = '/WX', - ), - fcc = dict( - native = '-mcpu=a64fx', - opt = None, - werror = None, - ) - ) - conf_min_features = dict( - x86 = "SSE SSE2", - x64 = "SSE SSE2 SSE3", - ppc64 = '', # play it safe - ppc64le = "VSX VSX2", - s390x = '', - armhf = '', # play it safe - aarch64 = "NEON NEON_FP16 NEON_VFPV4 ASIMD" - ) - conf_features = dict( - # X86 - SSE = dict( - interest=1, headers="xmmintrin.h", - # enabling SSE without SSE2 is useless also - # it's non-optional for x86_64 - implies="SSE2" - ), - SSE2 = dict(interest=2, implies="SSE", headers="emmintrin.h"), - SSE3 = dict(interest=3, implies="SSE2", headers="pmmintrin.h"), - SSSE3 = dict(interest=4, implies="SSE3", headers="tmmintrin.h"), - SSE41 = dict(interest=5, implies="SSSE3", headers="smmintrin.h"), - POPCNT = dict(interest=6, implies="SSE41", headers="popcntintrin.h"), - SSE42 = dict(interest=7, implies="POPCNT"), - AVX = dict( - interest=8, implies="SSE42", headers="immintrin.h", - implies_detect=False - ), - XOP = dict(interest=9, implies="AVX", headers="x86intrin.h"), - FMA4 = dict(interest=10, implies="AVX", headers="x86intrin.h"), - F16C = dict(interest=11, implies="AVX"), - FMA3 = dict(interest=12, implies="F16C"), - AVX2 = dict(interest=13, implies="F16C"), - AVX512F = dict( - interest=20, implies="FMA3 AVX2", implies_detect=False, - extra_checks="AVX512F_REDUCE" - ), - AVX512CD = dict(interest=21, implies="AVX512F"), - AVX512_KNL = dict( - interest=40, implies="AVX512CD", group="AVX512ER AVX512PF", - detect="AVX512_KNL", implies_detect=False - ), - AVX512_KNM = dict( - interest=41, implies="AVX512_KNL", - group="AVX5124FMAPS AVX5124VNNIW AVX512VPOPCNTDQ", - detect="AVX512_KNM", implies_detect=False - ), - AVX512_SKX = dict( - interest=42, implies="AVX512CD", group="AVX512VL AVX512BW AVX512DQ", - detect="AVX512_SKX", implies_detect=False, - extra_checks="AVX512BW_MASK AVX512DQ_MASK" - ), - AVX512_CLX = dict( - interest=43, implies="AVX512_SKX", group="AVX512VNNI", - detect="AVX512_CLX" - ), - AVX512_CNL = dict( - interest=44, implies="AVX512_SKX", group="AVX512IFMA AVX512VBMI", - detect="AVX512_CNL", implies_detect=False - ), - AVX512_ICL = dict( - interest=45, implies="AVX512_CLX AVX512_CNL", - group="AVX512VBMI2 AVX512BITALG AVX512VPOPCNTDQ", - detect="AVX512_ICL", implies_detect=False - ), - AVX512_SPR = dict( - interest=46, implies="AVX512_ICL", group="AVX512FP16", - detect="AVX512_SPR", implies_detect=False - ), - # IBM/Power - ## Power7/ISA 2.06 - VSX = dict(interest=1, headers="altivec.h", extra_checks="VSX_ASM"), - ## Power8/ISA 2.07 - VSX2 = dict(interest=2, implies="VSX", implies_detect=False), - ## Power9/ISA 3.00 - VSX3 = dict(interest=3, implies="VSX2", implies_detect=False, - extra_checks="VSX3_HALF_DOUBLE"), - ## Power10/ISA 3.1 - VSX4 = dict(interest=4, implies="VSX3", implies_detect=False, - extra_checks="VSX4_MMA"), - # IBM/Z - ## VX(z13) support - VX = dict(interest=1, headers="vecintrin.h"), - ## Vector-Enhancements Facility - VXE = dict(interest=2, implies="VX", implies_detect=False), - ## Vector-Enhancements Facility 2 - VXE2 = dict(interest=3, implies="VXE", implies_detect=False), - # ARM - NEON = dict(interest=1, headers="arm_neon.h"), - NEON_FP16 = dict(interest=2, implies="NEON"), - ## FMA - NEON_VFPV4 = dict(interest=3, implies="NEON_FP16"), - ## Advanced SIMD - ASIMD = dict(interest=4, implies="NEON_FP16 NEON_VFPV4", implies_detect=False), - ## ARMv8.2 half-precision & vector arithm - ASIMDHP = dict(interest=5, implies="ASIMD"), - ## ARMv8.2 dot product - ASIMDDP = dict(interest=6, implies="ASIMD"), - ## ARMv8.2 Single & half-precision Multiply - ASIMDFHM = dict(interest=7, implies="ASIMDHP") - ) - def conf_features_partial(self): - """Return a dictionary of supported CPU features by the platform, - and accumulate the rest of undefined options in `conf_features`, - the returned dict has same rules and notes in - class attribute `conf_features`, also its override - any options that been set in 'conf_features'. - """ - if self.cc_noopt: - # optimization is disabled - return {} - - on_x86 = self.cc_on_x86 or self.cc_on_x64 - is_unix = self.cc_is_gcc or self.cc_is_clang or self.cc_is_fcc - - if on_x86 and is_unix: return dict( - SSE = dict(flags="-msse"), - SSE2 = dict(flags="-msse2"), - SSE3 = dict(flags="-msse3"), - SSSE3 = dict(flags="-mssse3"), - SSE41 = dict(flags="-msse4.1"), - POPCNT = dict(flags="-mpopcnt"), - SSE42 = dict(flags="-msse4.2"), - AVX = dict(flags="-mavx"), - F16C = dict(flags="-mf16c"), - XOP = dict(flags="-mxop"), - FMA4 = dict(flags="-mfma4"), - FMA3 = dict(flags="-mfma"), - AVX2 = dict(flags="-mavx2"), - AVX512F = dict(flags="-mavx512f -mno-mmx"), - AVX512CD = dict(flags="-mavx512cd"), - AVX512_KNL = dict(flags="-mavx512er -mavx512pf"), - AVX512_KNM = dict( - flags="-mavx5124fmaps -mavx5124vnniw -mavx512vpopcntdq" - ), - AVX512_SKX = dict(flags="-mavx512vl -mavx512bw -mavx512dq"), - AVX512_CLX = dict(flags="-mavx512vnni"), - AVX512_CNL = dict(flags="-mavx512ifma -mavx512vbmi"), - AVX512_ICL = dict( - flags="-mavx512vbmi2 -mavx512bitalg -mavx512vpopcntdq" - ), - AVX512_SPR = dict(flags="-mavx512fp16"), - ) - if on_x86 and self.cc_is_icc: return dict( - SSE = dict(flags="-msse"), - SSE2 = dict(flags="-msse2"), - SSE3 = dict(flags="-msse3"), - SSSE3 = dict(flags="-mssse3"), - SSE41 = dict(flags="-msse4.1"), - POPCNT = {}, - SSE42 = dict(flags="-msse4.2"), - AVX = dict(flags="-mavx"), - F16C = {}, - XOP = dict(disable="Intel Compiler doesn't support it"), - FMA4 = dict(disable="Intel Compiler doesn't support it"), - # Intel Compiler doesn't support AVX2 or FMA3 independently - FMA3 = dict( - implies="F16C AVX2", flags="-march=core-avx2" - ), - AVX2 = dict(implies="FMA3", flags="-march=core-avx2"), - # Intel Compiler doesn't support AVX512F or AVX512CD independently - AVX512F = dict( - implies="AVX2 AVX512CD", flags="-march=common-avx512" - ), - AVX512CD = dict( - implies="AVX2 AVX512F", flags="-march=common-avx512" - ), - AVX512_KNL = dict(flags="-xKNL"), - AVX512_KNM = dict(flags="-xKNM"), - AVX512_SKX = dict(flags="-xSKYLAKE-AVX512"), - AVX512_CLX = dict(flags="-xCASCADELAKE"), - AVX512_CNL = dict(flags="-xCANNONLAKE"), - AVX512_ICL = dict(flags="-xICELAKE-CLIENT"), - AVX512_SPR = dict(disable="Not supported yet") - ) - if on_x86 and self.cc_is_iccw: return dict( - SSE = dict(flags="/arch:SSE"), - SSE2 = dict(flags="/arch:SSE2"), - SSE3 = dict(flags="/arch:SSE3"), - SSSE3 = dict(flags="/arch:SSSE3"), - SSE41 = dict(flags="/arch:SSE4.1"), - POPCNT = {}, - SSE42 = dict(flags="/arch:SSE4.2"), - AVX = dict(flags="/arch:AVX"), - F16C = {}, - XOP = dict(disable="Intel Compiler doesn't support it"), - FMA4 = dict(disable="Intel Compiler doesn't support it"), - # Intel Compiler doesn't support FMA3 or AVX2 independently - FMA3 = dict( - implies="F16C AVX2", flags="/arch:CORE-AVX2" - ), - AVX2 = dict( - implies="FMA3", flags="/arch:CORE-AVX2" - ), - # Intel Compiler doesn't support AVX512F or AVX512CD independently - AVX512F = dict( - implies="AVX2 AVX512CD", flags="/Qx:COMMON-AVX512" - ), - AVX512CD = dict( - implies="AVX2 AVX512F", flags="/Qx:COMMON-AVX512" - ), - AVX512_KNL = dict(flags="/Qx:KNL"), - AVX512_KNM = dict(flags="/Qx:KNM"), - AVX512_SKX = dict(flags="/Qx:SKYLAKE-AVX512"), - AVX512_CLX = dict(flags="/Qx:CASCADELAKE"), - AVX512_CNL = dict(flags="/Qx:CANNONLAKE"), - AVX512_ICL = dict(flags="/Qx:ICELAKE-CLIENT"), - AVX512_SPR = dict(disable="Not supported yet") - ) - if on_x86 and self.cc_is_msvc: return dict( - SSE = dict(flags="/arch:SSE") if self.cc_on_x86 else {}, - SSE2 = dict(flags="/arch:SSE2") if self.cc_on_x86 else {}, - SSE3 = {}, - SSSE3 = {}, - SSE41 = {}, - POPCNT = dict(headers="nmmintrin.h"), - SSE42 = {}, - AVX = dict(flags="/arch:AVX"), - F16C = {}, - XOP = dict(headers="ammintrin.h"), - FMA4 = dict(headers="ammintrin.h"), - # MSVC doesn't support FMA3 or AVX2 independently - FMA3 = dict( - implies="F16C AVX2", flags="/arch:AVX2" - ), - AVX2 = dict( - implies="F16C FMA3", flags="/arch:AVX2" - ), - # MSVC doesn't support AVX512F or AVX512CD independently, - # always generate instructions belong to (VL/VW/DQ) - AVX512F = dict( - implies="AVX2 AVX512CD AVX512_SKX", flags="/arch:AVX512" - ), - AVX512CD = dict( - implies="AVX512F AVX512_SKX", flags="/arch:AVX512" - ), - AVX512_KNL = dict( - disable="MSVC compiler doesn't support it" - ), - AVX512_KNM = dict( - disable="MSVC compiler doesn't support it" - ), - AVX512_SKX = dict(flags="/arch:AVX512"), - AVX512_CLX = {}, - AVX512_CNL = {}, - AVX512_ICL = {}, - AVX512_SPR= dict( - disable="MSVC compiler doesn't support it" - ) - ) - - on_power = self.cc_on_ppc64le or self.cc_on_ppc64 - if on_power: - partial = dict( - VSX = dict( - implies=("VSX2" if self.cc_on_ppc64le else ""), - flags="-mvsx" - ), - VSX2 = dict( - flags="-mcpu=power8", implies_detect=False - ), - VSX3 = dict( - flags="-mcpu=power9 -mtune=power9", implies_detect=False - ), - VSX4 = dict( - flags="-mcpu=power10 -mtune=power10", implies_detect=False - ) - ) - if self.cc_is_clang: - partial["VSX"]["flags"] = "-maltivec -mvsx" - partial["VSX2"]["flags"] = "-mcpu=power8" - partial["VSX3"]["flags"] = "-mcpu=power9" - partial["VSX4"]["flags"] = "-mcpu=power10" - - return partial - - on_zarch = self.cc_on_s390x - if on_zarch: - partial = dict( - VX = dict( - flags="-march=arch11 -mzvector" - ), - VXE = dict( - flags="-march=arch12", implies_detect=False - ), - VXE2 = dict( - flags="-march=arch13", implies_detect=False - ) - ) - - return partial - - - if self.cc_on_aarch64 and is_unix: return dict( - NEON = dict( - implies="NEON_FP16 NEON_VFPV4 ASIMD", autovec=True - ), - NEON_FP16 = dict( - implies="NEON NEON_VFPV4 ASIMD", autovec=True - ), - NEON_VFPV4 = dict( - implies="NEON NEON_FP16 ASIMD", autovec=True - ), - ASIMD = dict( - implies="NEON NEON_FP16 NEON_VFPV4", autovec=True - ), - ASIMDHP = dict( - flags="-march=armv8.2-a+fp16" - ), - ASIMDDP = dict( - flags="-march=armv8.2-a+dotprod" - ), - ASIMDFHM = dict( - flags="-march=armv8.2-a+fp16fml" - ), - ) - if self.cc_on_armhf and is_unix: return dict( - NEON = dict( - flags="-mfpu=neon" - ), - NEON_FP16 = dict( - flags="-mfpu=neon-fp16 -mfp16-format=ieee" - ), - NEON_VFPV4 = dict( - flags="-mfpu=neon-vfpv4", - ), - ASIMD = dict( - flags="-mfpu=neon-fp-armv8 -march=armv8-a+simd", - ), - ASIMDHP = dict( - flags="-march=armv8.2-a+fp16" - ), - ASIMDDP = dict( - flags="-march=armv8.2-a+dotprod", - ), - ASIMDFHM = dict( - flags="-march=armv8.2-a+fp16fml" - ) - ) - # TODO: ARM MSVC - return {} - - def __init__(self): - if self.conf_tmp_path is None: - import shutil - import tempfile - tmp = tempfile.mkdtemp() - def rm_temp(): - try: - shutil.rmtree(tmp) - except OSError: - pass - atexit.register(rm_temp) - self.conf_tmp_path = tmp - - if self.conf_cache_factors is None: - self.conf_cache_factors = [ - os.path.getmtime(__file__), - self.conf_nocache - ] - -class _Distutils: - """A helper class that provides a collection of fundamental methods - implemented in a top of Python and NumPy Distutils. - - The idea behind this class is to gather all methods that it may - need to override in case of reuse 'CCompilerOpt' in environment - different than of what NumPy has. - - Parameters - ---------- - ccompiler : `CCompiler` - The generate instance that returned from `distutils.ccompiler.new_compiler()`. - """ - def __init__(self, ccompiler): - self._ccompiler = ccompiler - - def dist_compile(self, sources, flags, ccompiler=None, **kwargs): - """Wrap CCompiler.compile()""" - assert(isinstance(sources, list)) - assert(isinstance(flags, list)) - flags = kwargs.pop("extra_postargs", []) + flags - if not ccompiler: - ccompiler = self._ccompiler - - return ccompiler.compile(sources, extra_postargs=flags, **kwargs) - - def dist_test(self, source, flags, macros=[]): - """Return True if 'CCompiler.compile()' able to compile - a source file with certain flags. - """ - assert(isinstance(source, str)) - from distutils.errors import CompileError - cc = self._ccompiler; - bk_spawn = getattr(cc, 'spawn', None) - if bk_spawn: - cc_type = getattr(self._ccompiler, "compiler_type", "") - if cc_type in ("msvc",): - setattr(cc, 'spawn', self._dist_test_spawn_paths) - else: - setattr(cc, 'spawn', self._dist_test_spawn) - test = False - try: - self.dist_compile( - [source], flags, macros=macros, output_dir=self.conf_tmp_path - ) - test = True - except CompileError as e: - self.dist_log(str(e), stderr=True) - if bk_spawn: - setattr(cc, 'spawn', bk_spawn) - return test - - def dist_info(self): - """ - Return a tuple containing info about (platform, compiler, extra_args), - required by the abstract class '_CCompiler' for discovering the - platform environment. This is also used as a cache factor in order - to detect any changes happening from outside. - """ - if hasattr(self, "_dist_info"): - return self._dist_info - - cc_type = getattr(self._ccompiler, "compiler_type", '') - if cc_type in ("intelem", "intelemw"): - platform = "x86_64" - elif cc_type in ("intel", "intelw", "intele"): - platform = "x86" - else: - from distutils.util import get_platform - platform = get_platform() - - cc_info = getattr(self._ccompiler, "compiler", getattr(self._ccompiler, "compiler_so", '')) - if not cc_type or cc_type == "unix": - if hasattr(cc_info, "__iter__"): - compiler = cc_info[0] - else: - compiler = str(cc_info) - else: - compiler = cc_type - - if hasattr(cc_info, "__iter__") and len(cc_info) > 1: - extra_args = ' '.join(cc_info[1:]) - else: - extra_args = os.environ.get("CFLAGS", "") - extra_args += os.environ.get("CPPFLAGS", "") - - self._dist_info = (platform, compiler, extra_args) - return self._dist_info - - @staticmethod - def dist_error(*args): - """Raise a compiler error""" - from distutils.errors import CompileError - raise CompileError(_Distutils._dist_str(*args)) - - @staticmethod - def dist_fatal(*args): - """Raise a distutils error""" - from distutils.errors import DistutilsError - raise DistutilsError(_Distutils._dist_str(*args)) - - @staticmethod - def dist_log(*args, stderr=False): - """Print a console message""" - from numpy.distutils import log - out = _Distutils._dist_str(*args) - if stderr: - log.warn(out) - else: - log.info(out) - - @staticmethod - def dist_load_module(name, path): - """Load a module from file, required by the abstract class '_Cache'.""" - from .misc_util import exec_mod_from_location - try: - return exec_mod_from_location(name, path) - except Exception as e: - _Distutils.dist_log(e, stderr=True) - return None - - @staticmethod - def _dist_str(*args): - """Return a string to print by log and errors.""" - def to_str(arg): - if not isinstance(arg, str) and hasattr(arg, '__iter__'): - ret = [] - for a in arg: - ret.append(to_str(a)) - return '('+ ' '.join(ret) + ')' - return str(arg) - - stack = inspect.stack()[2] - start = "CCompilerOpt.%s[%d] : " % (stack.function, stack.lineno) - out = ' '.join([ - to_str(a) - for a in (*args,) - ]) - return start + out - - def _dist_test_spawn_paths(self, cmd, display=None): - """ - Fix msvc SDK ENV path same as distutils do - without it we get c1: fatal error C1356: unable to find mspdbcore.dll - """ - if not hasattr(self._ccompiler, "_paths"): - self._dist_test_spawn(cmd) - return - old_path = os.getenv("path") - try: - os.environ["path"] = self._ccompiler._paths - self._dist_test_spawn(cmd) - finally: - os.environ["path"] = old_path - - _dist_warn_regex = re.compile( - # intel and msvc compilers don't raise - # fatal errors when flags are wrong or unsupported - ".*(" - "warning D9002|" # msvc, it should be work with any language. - "invalid argument for option" # intel - ").*" - ) - @staticmethod - def _dist_test_spawn(cmd, display=None): - try: - o = subprocess.check_output(cmd, stderr=subprocess.STDOUT, - text=True) - if o and re.match(_Distutils._dist_warn_regex, o): - _Distutils.dist_error( - "Flags in command", cmd ,"aren't supported by the compiler" - ", output -> \n%s" % o - ) - except subprocess.CalledProcessError as exc: - o = exc.output - s = exc.returncode - except OSError as e: - o = e - s = 127 - else: - return None - _Distutils.dist_error( - "Command", cmd, "failed with exit status %d output -> \n%s" % ( - s, o - )) - -_share_cache = {} -class _Cache: - """An abstract class handles caching functionality, provides two - levels of caching, in-memory by share instances attributes among - each other and by store attributes into files. - - **Note**: - any attributes that start with ``_`` or ``conf_`` will be ignored. - - Parameters - ---------- - cache_path : str or None - The path of cache file, if None then cache in file will disabled. - - *factors : - The caching factors that need to utilize next to `conf_cache_factors`. - - Attributes - ---------- - cache_private : set - Hold the attributes that need be skipped from "in-memory cache". - - cache_infile : bool - Utilized during initializing this class, to determine if the cache was able - to loaded from the specified cache path in 'cache_path'. - """ - - # skip attributes from cache - _cache_ignore = re.compile("^(_|conf_)") - - def __init__(self, cache_path=None, *factors): - self.cache_me = {} - self.cache_private = set() - self.cache_infile = False - self._cache_path = None - - if self.conf_nocache: - self.dist_log("cache is disabled by `Config`") - return - - self._cache_hash = self.cache_hash(*factors, *self.conf_cache_factors) - self._cache_path = cache_path - if cache_path: - if os.path.exists(cache_path): - self.dist_log("load cache from file ->", cache_path) - cache_mod = self.dist_load_module("cache", cache_path) - if not cache_mod: - self.dist_log( - "unable to load the cache file as a module", - stderr=True - ) - elif not hasattr(cache_mod, "hash") or \ - not hasattr(cache_mod, "data"): - self.dist_log("invalid cache file", stderr=True) - elif self._cache_hash == cache_mod.hash: - self.dist_log("hit the file cache") - for attr, val in cache_mod.data.items(): - setattr(self, attr, val) - self.cache_infile = True - else: - self.dist_log("miss the file cache") - - if not self.cache_infile: - other_cache = _share_cache.get(self._cache_hash) - if other_cache: - self.dist_log("hit the memory cache") - for attr, val in other_cache.__dict__.items(): - if attr in other_cache.cache_private or \ - re.match(self._cache_ignore, attr): - continue - setattr(self, attr, val) - - _share_cache[self._cache_hash] = self - atexit.register(self.cache_flush) - - def __del__(self): - for h, o in _share_cache.items(): - if o == self: - _share_cache.pop(h) - break - - def cache_flush(self): - """ - Force update the cache. - """ - if not self._cache_path: - return - # TODO: don't write if the cache doesn't change - self.dist_log("write cache to path ->", self._cache_path) - cdict = self.__dict__.copy() - for attr in self.__dict__.keys(): - if re.match(self._cache_ignore, attr): - cdict.pop(attr) - - d = os.path.dirname(self._cache_path) - if not os.path.exists(d): - os.makedirs(d) - - repr_dict = pprint.pformat(cdict, compact=True) - with open(self._cache_path, "w") as f: - f.write(textwrap.dedent("""\ - # AUTOGENERATED DON'T EDIT - # Please make changes to the code generator \ - (distutils/ccompiler_opt.py) - hash = {} - data = \\ - """).format(self._cache_hash)) - f.write(repr_dict) - - def cache_hash(self, *factors): - # is there a built-in non-crypto hash? - # sdbm - chash = 0 - for f in factors: - for char in str(f): - chash = ord(char) + (chash << 6) + (chash << 16) - chash - chash &= 0xFFFFFFFF - return chash - - @staticmethod - def me(cb): - """ - A static method that can be treated as a decorator to - dynamically cache certain methods. - """ - def cache_wrap_me(self, *args, **kwargs): - # good for normal args - cache_key = str(( - cb.__name__, *args, *kwargs.keys(), *kwargs.values() - )) - if cache_key in self.cache_me: - return self.cache_me[cache_key] - ccb = cb(self, *args, **kwargs) - self.cache_me[cache_key] = ccb - return ccb - return cache_wrap_me - -class _CCompiler: - """A helper class for `CCompilerOpt` containing all utilities that - related to the fundamental compiler's functions. - - Attributes - ---------- - cc_on_x86 : bool - True when the target architecture is 32-bit x86 - cc_on_x64 : bool - True when the target architecture is 64-bit x86 - cc_on_ppc64 : bool - True when the target architecture is 64-bit big-endian powerpc - cc_on_ppc64le : bool - True when the target architecture is 64-bit litle-endian powerpc - cc_on_s390x : bool - True when the target architecture is IBM/ZARCH on linux - cc_on_armhf : bool - True when the target architecture is 32-bit ARMv7+ - cc_on_aarch64 : bool - True when the target architecture is 64-bit Armv8-a+ - cc_on_noarch : bool - True when the target architecture is unknown or not supported - cc_is_gcc : bool - True if the compiler is GNU or - if the compiler is unknown - cc_is_clang : bool - True if the compiler is Clang - cc_is_icc : bool - True if the compiler is Intel compiler (unix like) - cc_is_iccw : bool - True if the compiler is Intel compiler (msvc like) - cc_is_nocc : bool - True if the compiler isn't supported directly, - Note: that cause a fail-back to gcc - cc_has_debug : bool - True if the compiler has debug flags - cc_has_native : bool - True if the compiler has native flags - cc_noopt : bool - True if the compiler has definition 'DISABLE_OPT*', - or 'cc_on_noarch' is True - cc_march : str - The target architecture name, or "unknown" if - the architecture isn't supported - cc_name : str - The compiler name, or "unknown" if the compiler isn't supported - cc_flags : dict - Dictionary containing the initialized flags of `_Config.conf_cc_flags` - """ - def __init__(self): - if hasattr(self, "cc_is_cached"): - return - # attr regex compiler-expression - detect_arch = ( - ("cc_on_x64", ".*(x|x86_|amd)64.*", ""), - ("cc_on_x86", ".*(win32|x86|i386|i686).*", ""), - ("cc_on_ppc64le", ".*(powerpc|ppc)64(el|le).*|.*powerpc.*", - "defined(__powerpc64__) && " - "defined(__LITTLE_ENDIAN__)"), - ("cc_on_ppc64", ".*(powerpc|ppc).*|.*powerpc.*", - "defined(__powerpc64__) && " - "defined(__BIG_ENDIAN__)"), - ("cc_on_aarch64", ".*(aarch64|arm64).*", ""), - ("cc_on_armhf", ".*arm.*", "defined(__ARM_ARCH_7__) || " - "defined(__ARM_ARCH_7A__)"), - ("cc_on_s390x", ".*s390x.*", ""), - # undefined platform - ("cc_on_noarch", "", ""), - ) - detect_compiler = ( - ("cc_is_gcc", r".*(gcc|gnu\-g).*", ""), - ("cc_is_clang", ".*clang.*", ""), - # intel msvc like - ("cc_is_iccw", ".*(intelw|intelemw|iccw).*", ""), - ("cc_is_icc", ".*(intel|icc).*", ""), # intel unix like - ("cc_is_msvc", ".*msvc.*", ""), - ("cc_is_fcc", ".*fcc.*", ""), - # undefined compiler will be treat it as gcc - ("cc_is_nocc", "", ""), - ) - detect_args = ( - ("cc_has_debug", ".*(O0|Od|ggdb|coverage|debug:full).*", ""), - ("cc_has_native", - ".*(-march=native|-xHost|/QxHost|-mcpu=a64fx).*", ""), - # in case if the class run with -DNPY_DISABLE_OPTIMIZATION - ("cc_noopt", ".*DISABLE_OPT.*", ""), - ) - - dist_info = self.dist_info() - platform, compiler_info, extra_args = dist_info - # set False to all attrs - for section in (detect_arch, detect_compiler, detect_args): - for attr, rgex, cexpr in section: - setattr(self, attr, False) - - for detect, searchin in ((detect_arch, platform), (detect_compiler, compiler_info)): - for attr, rgex, cexpr in detect: - if rgex and not re.match(rgex, searchin, re.IGNORECASE): - continue - if cexpr and not self.cc_test_cexpr(cexpr): - continue - setattr(self, attr, True) - break - - for attr, rgex, cexpr in detect_args: - if rgex and not re.match(rgex, extra_args, re.IGNORECASE): - continue - if cexpr and not self.cc_test_cexpr(cexpr): - continue - setattr(self, attr, True) - - if self.cc_on_noarch: - self.dist_log( - "unable to detect CPU architecture which lead to disable the optimization. " - f"check dist_info:<<\n{dist_info}\n>>", - stderr=True - ) - self.cc_noopt = True - - if self.conf_noopt: - self.dist_log("Optimization is disabled by the Config", stderr=True) - self.cc_noopt = True - - if self.cc_is_nocc: - """ - mingw can be treated as a gcc, and also xlc even if it based on clang, - but still has the same gcc optimization flags. - """ - self.dist_log( - "unable to detect compiler type which leads to treating it as GCC. " - "this is a normal behavior if you're using gcc-like compiler such as MinGW or IBM/XLC." - f"check dist_info:<<\n{dist_info}\n>>", - stderr=True - ) - self.cc_is_gcc = True - - self.cc_march = "unknown" - for arch in ("x86", "x64", "ppc64", "ppc64le", - "armhf", "aarch64", "s390x"): - if getattr(self, "cc_on_" + arch): - self.cc_march = arch - break - - self.cc_name = "unknown" - for name in ("gcc", "clang", "iccw", "icc", "msvc", "fcc"): - if getattr(self, "cc_is_" + name): - self.cc_name = name - break - - self.cc_flags = {} - compiler_flags = self.conf_cc_flags.get(self.cc_name) - if compiler_flags is None: - self.dist_fatal( - "undefined flag for compiler '%s', " - "leave an empty dict instead" % self.cc_name - ) - for name, flags in compiler_flags.items(): - self.cc_flags[name] = nflags = [] - if flags: - assert(isinstance(flags, str)) - flags = flags.split() - for f in flags: - if self.cc_test_flags([f]): - nflags.append(f) - - self.cc_is_cached = True - - @_Cache.me - def cc_test_flags(self, flags): - """ - Returns True if the compiler supports 'flags'. - """ - assert(isinstance(flags, list)) - self.dist_log("testing flags", flags) - test_path = os.path.join(self.conf_check_path, "test_flags.c") - test = self.dist_test(test_path, flags) - if not test: - self.dist_log("testing failed", stderr=True) - return test - - @_Cache.me - def cc_test_cexpr(self, cexpr, flags=[]): - """ - Same as the above but supports compile-time expressions. - """ - self.dist_log("testing compiler expression", cexpr) - test_path = os.path.join(self.conf_tmp_path, "npy_dist_test_cexpr.c") - with open(test_path, "w") as fd: - fd.write(textwrap.dedent(f"""\ - #if !({cexpr}) - #error "unsupported expression" - #endif - int dummy; - """)) - test = self.dist_test(test_path, flags) - if not test: - self.dist_log("testing failed", stderr=True) - return test - - def cc_normalize_flags(self, flags): - """ - Remove the conflicts that caused due gathering implied features flags. - - Parameters - ---------- - 'flags' list, compiler flags - flags should be sorted from the lowest to the highest interest. - - Returns - ------- - list, filtered from any conflicts. - - Examples - -------- - >>> self.cc_normalize_flags(['-march=armv8.2-a+fp16', '-march=armv8.2-a+dotprod']) - ['armv8.2-a+fp16+dotprod'] - - >>> self.cc_normalize_flags( - ['-msse', '-msse2', '-msse3', '-mssse3', '-msse4.1', '-msse4.2', '-mavx', '-march=core-avx2'] - ) - ['-march=core-avx2'] - """ - assert(isinstance(flags, list)) - if self.cc_is_gcc or self.cc_is_clang or self.cc_is_icc: - return self._cc_normalize_unix(flags) - - if self.cc_is_msvc or self.cc_is_iccw: - return self._cc_normalize_win(flags) - return flags - - _cc_normalize_unix_mrgx = re.compile( - # 1- to check the highest of - r"^(-mcpu=|-march=|-x[A-Z0-9\-])" - ) - _cc_normalize_unix_frgx = re.compile( - # 2- to remove any flags starts with - # -march, -mcpu, -x(INTEL) and '-m' without '=' - r"^(?!(-mcpu=|-march=|-x[A-Z0-9\-]|-m[a-z0-9\-\.]*.$))|" - # exclude: - r"(?:-mzvector)" - ) - _cc_normalize_unix_krgx = re.compile( - # 3- keep only the highest of - r"^(-mfpu|-mtune)" - ) - _cc_normalize_arch_ver = re.compile( - r"[0-9.]" - ) - def _cc_normalize_unix(self, flags): - def ver_flags(f): - # arch ver subflag - # -march=armv8.2-a+fp16fml - tokens = f.split('+') - ver = float('0' + ''.join( - re.findall(self._cc_normalize_arch_ver, tokens[0]) - )) - return ver, tokens[0], tokens[1:] - - if len(flags) <= 1: - return flags - # get the highest matched flag - for i, cur_flag in enumerate(reversed(flags)): - if not re.match(self._cc_normalize_unix_mrgx, cur_flag): - continue - lower_flags = flags[:-(i+1)] - upper_flags = flags[-i:] - filtered = list(filter( - self._cc_normalize_unix_frgx.search, lower_flags - )) - # gather subflags - ver, arch, subflags = ver_flags(cur_flag) - if ver > 0 and len(subflags) > 0: - for xflag in lower_flags: - xver, _, xsubflags = ver_flags(xflag) - if ver == xver: - subflags = xsubflags + subflags - cur_flag = arch + '+' + '+'.join(subflags) - - flags = filtered + [cur_flag] - if i > 0: - flags += upper_flags - break - - # to remove overridable flags - final_flags = [] - matched = set() - for f in reversed(flags): - match = re.match(self._cc_normalize_unix_krgx, f) - if not match: - pass - elif match[0] in matched: - continue - else: - matched.add(match[0]) - final_flags.insert(0, f) - return final_flags - - _cc_normalize_win_frgx = re.compile( - r"^(?!(/arch\:|/Qx\:))" - ) - _cc_normalize_win_mrgx = re.compile( - r"^(/arch|/Qx:)" - ) - def _cc_normalize_win(self, flags): - for i, f in enumerate(reversed(flags)): - if not re.match(self._cc_normalize_win_mrgx, f): - continue - i += 1 - return list(filter( - self._cc_normalize_win_frgx.search, flags[:-i] - )) + flags[-i:] - return flags - -class _Feature: - """A helper class for `CCompilerOpt` that managing CPU features. - - Attributes - ---------- - feature_supported : dict - Dictionary containing all CPU features that supported - by the platform, according to the specified values in attribute - `_Config.conf_features` and `_Config.conf_features_partial()` - - feature_min : set - The minimum support of CPU features, according to - the specified values in attribute `_Config.conf_min_features`. - """ - def __init__(self): - if hasattr(self, "feature_is_cached"): - return - self.feature_supported = pfeatures = self.conf_features_partial() - for feature_name in list(pfeatures.keys()): - feature = pfeatures[feature_name] - cfeature = self.conf_features[feature_name] - feature.update({ - k:v for k,v in cfeature.items() if k not in feature - }) - disabled = feature.get("disable") - if disabled is not None: - pfeatures.pop(feature_name) - self.dist_log( - "feature '%s' is disabled," % feature_name, - disabled, stderr=True - ) - continue - # list is used internally for these options - for option in ( - "implies", "group", "detect", "headers", "flags", "extra_checks" - ) : - oval = feature.get(option) - if isinstance(oval, str): - feature[option] = oval.split() - - self.feature_min = set() - min_f = self.conf_min_features.get(self.cc_march, "") - for F in min_f.upper().split(): - if F in self.feature_supported: - self.feature_min.add(F) - - self.feature_is_cached = True - - def feature_names(self, names=None, force_flags=None, macros=[]): - """ - Returns a set of CPU feature names that supported by platform and the **C** compiler. - - Parameters - ---------- - names : sequence or None, optional - Specify certain CPU features to test it against the **C** compiler. - if None(default), it will test all current supported features. - **Note**: feature names must be in upper-case. - - force_flags : list or None, optional - If None(default), default compiler flags for every CPU feature will - be used during the test. - - macros : list of tuples, optional - A list of C macro definitions. - """ - assert( - names is None or ( - not isinstance(names, str) and - hasattr(names, "__iter__") - ) - ) - assert(force_flags is None or isinstance(force_flags, list)) - if names is None: - names = self.feature_supported.keys() - supported_names = set() - for f in names: - if self.feature_is_supported( - f, force_flags=force_flags, macros=macros - ): - supported_names.add(f) - return supported_names - - def feature_is_exist(self, name): - """ - Returns True if a certain feature is exist and covered within - ``_Config.conf_features``. - - Parameters - ---------- - 'name': str - feature name in uppercase. - """ - assert(name.isupper()) - return name in self.conf_features - - def feature_sorted(self, names, reverse=False): - """ - Sort a list of CPU features ordered by the lowest interest. - - Parameters - ---------- - 'names': sequence - sequence of supported feature names in uppercase. - 'reverse': bool, optional - If true, the sorted features is reversed. (highest interest) - - Returns - ------- - list, sorted CPU features - """ - def sort_cb(k): - if isinstance(k, str): - return self.feature_supported[k]["interest"] - # multiple features - rank = max([self.feature_supported[f]["interest"] for f in k]) - # FIXME: that's not a safe way to increase the rank for - # multi targets - rank += len(k) -1 - return rank - return sorted(names, reverse=reverse, key=sort_cb) - - def feature_implies(self, names, keep_origins=False): - """ - Return a set of CPU features that implied by 'names' - - Parameters - ---------- - names : str or sequence of str - CPU feature name(s) in uppercase. - - keep_origins : bool - if False(default) then the returned set will not contain any - features from 'names'. This case happens only when two features - imply each other. - - Examples - -------- - >>> self.feature_implies("SSE3") - {'SSE', 'SSE2'} - >>> self.feature_implies("SSE2") - {'SSE'} - >>> self.feature_implies("SSE2", keep_origins=True) - # 'SSE2' found here since 'SSE' and 'SSE2' imply each other - {'SSE', 'SSE2'} - """ - def get_implies(name, _caller=set()): - implies = set() - d = self.feature_supported[name] - for i in d.get("implies", []): - implies.add(i) - if i in _caller: - # infinity recursive guard since - # features can imply each other - continue - _caller.add(name) - implies = implies.union(get_implies(i, _caller)) - return implies - - if isinstance(names, str): - implies = get_implies(names) - names = [names] - else: - assert(hasattr(names, "__iter__")) - implies = set() - for n in names: - implies = implies.union(get_implies(n)) - if not keep_origins: - implies.difference_update(names) - return implies - - def feature_implies_c(self, names): - """same as feature_implies() but combining 'names'""" - if isinstance(names, str): - names = set((names,)) - else: - names = set(names) - return names.union(self.feature_implies(names)) - - def feature_ahead(self, names): - """ - Return list of features in 'names' after remove any - implied features and keep the origins. - - Parameters - ---------- - 'names': sequence - sequence of CPU feature names in uppercase. - - Returns - ------- - list of CPU features sorted as-is 'names' - - Examples - -------- - >>> self.feature_ahead(["SSE2", "SSE3", "SSE41"]) - ["SSE41"] - # assume AVX2 and FMA3 implies each other and AVX2 - # is the highest interest - >>> self.feature_ahead(["SSE2", "SSE3", "SSE41", "AVX2", "FMA3"]) - ["AVX2"] - # assume AVX2 and FMA3 don't implies each other - >>> self.feature_ahead(["SSE2", "SSE3", "SSE41", "AVX2", "FMA3"]) - ["AVX2", "FMA3"] - """ - assert( - not isinstance(names, str) - and hasattr(names, '__iter__') - ) - implies = self.feature_implies(names, keep_origins=True) - ahead = [n for n in names if n not in implies] - if len(ahead) == 0: - # return the highest interested feature - # if all features imply each other - ahead = self.feature_sorted(names, reverse=True)[:1] - return ahead - - def feature_untied(self, names): - """ - same as 'feature_ahead()' but if both features implied each other - and keep the highest interest. - - Parameters - ---------- - 'names': sequence - sequence of CPU feature names in uppercase. - - Returns - ------- - list of CPU features sorted as-is 'names' - - Examples - -------- - >>> self.feature_untied(["SSE2", "SSE3", "SSE41"]) - ["SSE2", "SSE3", "SSE41"] - # assume AVX2 and FMA3 implies each other - >>> self.feature_untied(["SSE2", "SSE3", "SSE41", "FMA3", "AVX2"]) - ["SSE2", "SSE3", "SSE41", "AVX2"] - """ - assert( - not isinstance(names, str) - and hasattr(names, '__iter__') - ) - final = [] - for n in names: - implies = self.feature_implies(n) - tied = [ - nn for nn in final - if nn in implies and n in self.feature_implies(nn) - ] - if tied: - tied = self.feature_sorted(tied + [n]) - if n not in tied[1:]: - continue - final.remove(tied[:1][0]) - final.append(n) - return final - - def feature_get_til(self, names, keyisfalse): - """ - same as `feature_implies_c()` but stop collecting implied - features when feature's option that provided through - parameter 'keyisfalse' is False, also sorting the returned - features. - """ - def til(tnames): - # sort from highest to lowest interest then cut if "key" is False - tnames = self.feature_implies_c(tnames) - tnames = self.feature_sorted(tnames, reverse=True) - for i, n in enumerate(tnames): - if not self.feature_supported[n].get(keyisfalse, True): - tnames = tnames[:i+1] - break - return tnames - - if isinstance(names, str) or len(names) <= 1: - names = til(names) - # normalize the sort - names.reverse() - return names - - names = self.feature_ahead(names) - names = {t for n in names for t in til(n)} - return self.feature_sorted(names) - - def feature_detect(self, names): - """ - Return a list of CPU features that required to be detected - sorted from the lowest to highest interest. - """ - names = self.feature_get_til(names, "implies_detect") - detect = [] - for n in names: - d = self.feature_supported[n] - detect += d.get("detect", d.get("group", [n])) - return detect - - @_Cache.me - def feature_flags(self, names): - """ - Return a list of CPU features flags sorted from the lowest - to highest interest. - """ - names = self.feature_sorted(self.feature_implies_c(names)) - flags = [] - for n in names: - d = self.feature_supported[n] - f = d.get("flags", []) - if not f or not self.cc_test_flags(f): - continue - flags += f - return self.cc_normalize_flags(flags) - - @_Cache.me - def feature_test(self, name, force_flags=None, macros=[]): - """ - Test a certain CPU feature against the compiler through its own - check file. - - Parameters - ---------- - name : str - Supported CPU feature name. - - force_flags : list or None, optional - If None(default), the returned flags from `feature_flags()` - will be used. - - macros : list of tuples, optional - A list of C macro definitions. - """ - if force_flags is None: - force_flags = self.feature_flags(name) - - self.dist_log( - "testing feature '%s' with flags (%s)" % ( - name, ' '.join(force_flags) - )) - # Each CPU feature must have C source code contains at - # least one intrinsic or instruction related to this feature. - test_path = os.path.join( - self.conf_check_path, "cpu_%s.c" % name.lower() - ) - if not os.path.exists(test_path): - self.dist_fatal("feature test file is not exist", test_path) - - test = self.dist_test( - test_path, force_flags + self.cc_flags["werror"], macros=macros - ) - if not test: - self.dist_log("testing failed", stderr=True) - return test - - @_Cache.me - def feature_is_supported(self, name, force_flags=None, macros=[]): - """ - Check if a certain CPU feature is supported by the platform and compiler. - - Parameters - ---------- - name : str - CPU feature name in uppercase. - - force_flags : list or None, optional - If None(default), default compiler flags for every CPU feature will - be used during test. - - macros : list of tuples, optional - A list of C macro definitions. - """ - assert(name.isupper()) - assert(force_flags is None or isinstance(force_flags, list)) - - supported = name in self.feature_supported - if supported: - for impl in self.feature_implies(name): - if not self.feature_test(impl, force_flags, macros=macros): - return False - if not self.feature_test(name, force_flags, macros=macros): - return False - return supported - - @_Cache.me - def feature_can_autovec(self, name): - """ - check if the feature can be auto-vectorized by the compiler - """ - assert(isinstance(name, str)) - d = self.feature_supported[name] - can = d.get("autovec", None) - if can is None: - valid_flags = [ - self.cc_test_flags([f]) for f in d.get("flags", []) - ] - can = valid_flags and any(valid_flags) - return can - - @_Cache.me - def feature_extra_checks(self, name): - """ - Return a list of supported extra checks after testing them against - the compiler. - - Parameters - ---------- - names : str - CPU feature name in uppercase. - """ - assert isinstance(name, str) - d = self.feature_supported[name] - extra_checks = d.get("extra_checks", []) - if not extra_checks: - return [] - - self.dist_log("Testing extra checks for feature '%s'" % name, extra_checks) - flags = self.feature_flags(name) - available = [] - not_available = [] - for chk in extra_checks: - test_path = os.path.join( - self.conf_check_path, "extra_%s.c" % chk.lower() - ) - if not os.path.exists(test_path): - self.dist_fatal("extra check file does not exist", test_path) - - is_supported = self.dist_test(test_path, flags + self.cc_flags["werror"]) - if is_supported: - available.append(chk) - else: - not_available.append(chk) - - if not_available: - self.dist_log("testing failed for checks", not_available, stderr=True) - return available - - - def feature_c_preprocessor(self, feature_name, tabs=0): - """ - Generate C preprocessor definitions and include headers of a CPU feature. - - Parameters - ---------- - 'feature_name': str - CPU feature name in uppercase. - 'tabs': int - if > 0, align the generated strings to the right depend on number of tabs. - - Returns - ------- - str, generated C preprocessor - - Examples - -------- - >>> self.feature_c_preprocessor("SSE3") - /** SSE3 **/ - #define NPY_HAVE_SSE3 1 - #include - """ - assert(feature_name.isupper()) - feature = self.feature_supported.get(feature_name) - assert(feature is not None) - - prepr = [ - "/** %s **/" % feature_name, - "#define %sHAVE_%s 1" % (self.conf_c_prefix, feature_name) - ] - prepr += [ - "#include <%s>" % h for h in feature.get("headers", []) - ] - - extra_defs = feature.get("group", []) - extra_defs += self.feature_extra_checks(feature_name) - for edef in extra_defs: - # Guard extra definitions in case of duplicate with - # another feature - prepr += [ - "#ifndef %sHAVE_%s" % (self.conf_c_prefix, edef), - "\t#define %sHAVE_%s 1" % (self.conf_c_prefix, edef), - "#endif", - ] - - if tabs > 0: - prepr = [('\t'*tabs) + l for l in prepr] - return '\n'.join(prepr) - -class _Parse: - """A helper class that parsing main arguments of `CCompilerOpt`, - also parsing configuration statements in dispatch-able sources. - - Parameters - ---------- - cpu_baseline : str or None - minimal set of required CPU features or special options. - - cpu_dispatch : str or None - dispatched set of additional CPU features or special options. - - Special options can be: - - **MIN**: Enables the minimum CPU features that utilized via `_Config.conf_min_features` - - **MAX**: Enables all supported CPU features by the Compiler and platform. - - **NATIVE**: Enables all CPU features that supported by the current machine. - - **NONE**: Enables nothing - - **Operand +/-**: remove or add features, useful with options **MAX**, **MIN** and **NATIVE**. - NOTE: operand + is only added for nominal reason. - - NOTES: - - Case-insensitive among all CPU features and special options. - - Comma or space can be used as a separator. - - If the CPU feature is not supported by the user platform or compiler, - it will be skipped rather than raising a fatal error. - - Any specified CPU features to 'cpu_dispatch' will be skipped if its part of CPU baseline features - - 'cpu_baseline' force enables implied features. - - Attributes - ---------- - parse_baseline_names : list - Final CPU baseline's feature names(sorted from low to high) - parse_baseline_flags : list - Compiler flags of baseline features - parse_dispatch_names : list - Final CPU dispatch-able feature names(sorted from low to high) - parse_target_groups : dict - Dictionary containing initialized target groups that configured - through class attribute `conf_target_groups`. - - The key is represent the group name and value is a tuple - contains three items : - - bool, True if group has the 'baseline' option. - - list, list of CPU features. - - list, list of extra compiler flags. - - """ - def __init__(self, cpu_baseline, cpu_dispatch): - self._parse_policies = dict( - # POLICY NAME, (HAVE, NOT HAVE, [DEB]) - KEEP_BASELINE = ( - None, self._parse_policy_not_keepbase, - [] - ), - KEEP_SORT = ( - self._parse_policy_keepsort, - self._parse_policy_not_keepsort, - [] - ), - MAXOPT = ( - self._parse_policy_maxopt, None, - [] - ), - WERROR = ( - self._parse_policy_werror, None, - [] - ), - AUTOVEC = ( - self._parse_policy_autovec, None, - ["MAXOPT"] - ) - ) - if hasattr(self, "parse_is_cached"): - return - - self.parse_baseline_names = [] - self.parse_baseline_flags = [] - self.parse_dispatch_names = [] - self.parse_target_groups = {} - - if self.cc_noopt: - # skip parsing baseline and dispatch args and keep parsing target groups - cpu_baseline = cpu_dispatch = None - - self.dist_log("check requested baseline") - if cpu_baseline is not None: - cpu_baseline = self._parse_arg_features("cpu_baseline", cpu_baseline) - baseline_names = self.feature_names(cpu_baseline) - self.parse_baseline_flags = self.feature_flags(baseline_names) - self.parse_baseline_names = self.feature_sorted( - self.feature_implies_c(baseline_names) - ) - - self.dist_log("check requested dispatch-able features") - if cpu_dispatch is not None: - cpu_dispatch_ = self._parse_arg_features("cpu_dispatch", cpu_dispatch) - cpu_dispatch = { - f for f in cpu_dispatch_ - if f not in self.parse_baseline_names - } - conflict_baseline = cpu_dispatch_.difference(cpu_dispatch) - self.parse_dispatch_names = self.feature_sorted( - self.feature_names(cpu_dispatch) - ) - if len(conflict_baseline) > 0: - self.dist_log( - "skip features", conflict_baseline, "since its part of baseline" - ) - - self.dist_log("initialize targets groups") - for group_name, tokens in self.conf_target_groups.items(): - self.dist_log("parse target group", group_name) - GROUP_NAME = group_name.upper() - if not tokens or not tokens.strip(): - # allow empty groups, useful in case if there's a need - # to disable certain group since '_parse_target_tokens()' - # requires at least one valid target - self.parse_target_groups[GROUP_NAME] = ( - False, [], [] - ) - continue - has_baseline, features, extra_flags = \ - self._parse_target_tokens(tokens) - self.parse_target_groups[GROUP_NAME] = ( - has_baseline, features, extra_flags - ) - - self.parse_is_cached = True - - def parse_targets(self, source): - """ - Fetch and parse configuration statements that required for - defining the targeted CPU features, statements should be declared - in the top of source in between **C** comment and start - with a special mark **@targets**. - - Configuration statements are sort of keywords representing - CPU features names, group of statements and policies, combined - together to determine the required optimization. - - Parameters - ---------- - source : str - the path of **C** source file. - - Returns - ------- - - bool, True if group has the 'baseline' option - - list, list of CPU features - - list, list of extra compiler flags - """ - self.dist_log("looking for '@targets' inside -> ", source) - # get lines between /*@targets and */ - with open(source) as fd: - tokens = "" - max_to_reach = 1000 # good enough, isn't? - start_with = "@targets" - start_pos = -1 - end_with = "*/" - end_pos = -1 - for current_line, line in enumerate(fd): - if current_line == max_to_reach: - self.dist_fatal("reached the max of lines") - break - if start_pos == -1: - start_pos = line.find(start_with) - if start_pos == -1: - continue - start_pos += len(start_with) - tokens += line - end_pos = line.find(end_with) - if end_pos != -1: - end_pos += len(tokens) - len(line) - break - - if start_pos == -1: - self.dist_fatal("expected to find '%s' within a C comment" % start_with) - if end_pos == -1: - self.dist_fatal("expected to end with '%s'" % end_with) - - tokens = tokens[start_pos:end_pos] - return self._parse_target_tokens(tokens) - - _parse_regex_arg = re.compile(r'\s|,|([+-])') - def _parse_arg_features(self, arg_name, req_features): - if not isinstance(req_features, str): - self.dist_fatal("expected a string in '%s'" % arg_name) - - final_features = set() - # space and comma can be used as a separator - tokens = list(filter(None, re.split(self._parse_regex_arg, req_features))) - append = True # append is the default - for tok in tokens: - if tok[0] in ("#", "$"): - self.dist_fatal( - arg_name, "target groups and policies " - "aren't allowed from arguments, " - "only from dispatch-able sources" - ) - if tok == '+': - append = True - continue - if tok == '-': - append = False - continue - - TOK = tok.upper() # we use upper-case internally - features_to = set() - if TOK == "NONE": - pass - elif TOK == "NATIVE": - native = self.cc_flags["native"] - if not native: - self.dist_fatal(arg_name, - "native option isn't supported by the compiler" - ) - features_to = self.feature_names( - force_flags=native, macros=[("DETECT_FEATURES", 1)] - ) - elif TOK == "MAX": - features_to = self.feature_supported.keys() - elif TOK == "MIN": - features_to = self.feature_min - else: - if TOK in self.feature_supported: - features_to.add(TOK) - else: - if not self.feature_is_exist(TOK): - self.dist_fatal(arg_name, - ", '%s' isn't a known feature or option" % tok - ) - if append: - final_features = final_features.union(features_to) - else: - final_features = final_features.difference(features_to) - - append = True # back to default - - return final_features - - _parse_regex_target = re.compile(r'\s|[*,/]|([()])') - def _parse_target_tokens(self, tokens): - assert(isinstance(tokens, str)) - final_targets = [] # to keep it sorted as specified - extra_flags = [] - has_baseline = False - - skipped = set() - policies = set() - multi_target = None - - tokens = list(filter(None, re.split(self._parse_regex_target, tokens))) - if not tokens: - self.dist_fatal("expected one token at least") - - for tok in tokens: - TOK = tok.upper() - ch = tok[0] - if ch in ('+', '-'): - self.dist_fatal( - "+/- are 'not' allowed from target's groups or @targets, " - "only from cpu_baseline and cpu_dispatch parms" - ) - elif ch == '$': - if multi_target is not None: - self.dist_fatal( - "policies aren't allowed inside multi-target '()'" - ", only CPU features" - ) - policies.add(self._parse_token_policy(TOK)) - elif ch == '#': - if multi_target is not None: - self.dist_fatal( - "target groups aren't allowed inside multi-target '()'" - ", only CPU features" - ) - has_baseline, final_targets, extra_flags = \ - self._parse_token_group(TOK, has_baseline, final_targets, extra_flags) - elif ch == '(': - if multi_target is not None: - self.dist_fatal("unclosed multi-target, missing ')'") - multi_target = set() - elif ch == ')': - if multi_target is None: - self.dist_fatal("multi-target opener '(' wasn't found") - targets = self._parse_multi_target(multi_target) - if targets is None: - skipped.add(tuple(multi_target)) - else: - if len(targets) == 1: - targets = targets[0] - if targets and targets not in final_targets: - final_targets.append(targets) - multi_target = None # back to default - else: - if TOK == "BASELINE": - if multi_target is not None: - self.dist_fatal("baseline isn't allowed inside multi-target '()'") - has_baseline = True - continue - - if multi_target is not None: - multi_target.add(TOK) - continue - - if not self.feature_is_exist(TOK): - self.dist_fatal("invalid target name '%s'" % TOK) - - is_enabled = ( - TOK in self.parse_baseline_names or - TOK in self.parse_dispatch_names - ) - if is_enabled: - if TOK not in final_targets: - final_targets.append(TOK) - continue - - skipped.add(TOK) - - if multi_target is not None: - self.dist_fatal("unclosed multi-target, missing ')'") - if skipped: - self.dist_log( - "skip targets", skipped, - "not part of baseline or dispatch-able features" - ) - - final_targets = self.feature_untied(final_targets) - - # add polices dependencies - for p in list(policies): - _, _, deps = self._parse_policies[p] - for d in deps: - if d in policies: - continue - self.dist_log( - "policy '%s' force enables '%s'" % ( - p, d - )) - policies.add(d) - - # release policies filtrations - for p, (have, nhave, _) in self._parse_policies.items(): - func = None - if p in policies: - func = have - self.dist_log("policy '%s' is ON" % p) - else: - func = nhave - if not func: - continue - has_baseline, final_targets, extra_flags = func( - has_baseline, final_targets, extra_flags - ) - - return has_baseline, final_targets, extra_flags - - def _parse_token_policy(self, token): - """validate policy token""" - if len(token) <= 1 or token[-1:] == token[0]: - self.dist_fatal("'$' must stuck in the begin of policy name") - token = token[1:] - if token not in self._parse_policies: - self.dist_fatal( - "'%s' is an invalid policy name, available policies are" % token, - self._parse_policies.keys() - ) - return token - - def _parse_token_group(self, token, has_baseline, final_targets, extra_flags): - """validate group token""" - if len(token) <= 1 or token[-1:] == token[0]: - self.dist_fatal("'#' must stuck in the begin of group name") - - token = token[1:] - ghas_baseline, gtargets, gextra_flags = self.parse_target_groups.get( - token, (False, None, []) - ) - if gtargets is None: - self.dist_fatal( - "'%s' is an invalid target group name, " % token + \ - "available target groups are", - self.parse_target_groups.keys() - ) - if ghas_baseline: - has_baseline = True - # always keep sorting as specified - final_targets += [f for f in gtargets if f not in final_targets] - extra_flags += [f for f in gextra_flags if f not in extra_flags] - return has_baseline, final_targets, extra_flags - - def _parse_multi_target(self, targets): - """validate multi targets that defined between parentheses()""" - # remove any implied features and keep the origins - if not targets: - self.dist_fatal("empty multi-target '()'") - if not all([ - self.feature_is_exist(tar) for tar in targets - ]) : - self.dist_fatal("invalid target name in multi-target", targets) - if not all([ - ( - tar in self.parse_baseline_names or - tar in self.parse_dispatch_names - ) - for tar in targets - ]) : - return None - targets = self.feature_ahead(targets) - if not targets: - return None - # force sort multi targets, so it can be comparable - targets = self.feature_sorted(targets) - targets = tuple(targets) # hashable - return targets - - def _parse_policy_not_keepbase(self, has_baseline, final_targets, extra_flags): - """skip all baseline features""" - skipped = [] - for tar in final_targets[:]: - is_base = False - if isinstance(tar, str): - is_base = tar in self.parse_baseline_names - else: - # multi targets - is_base = all([ - f in self.parse_baseline_names - for f in tar - ]) - if is_base: - skipped.append(tar) - final_targets.remove(tar) - - if skipped: - self.dist_log("skip baseline features", skipped) - - return has_baseline, final_targets, extra_flags - - def _parse_policy_keepsort(self, has_baseline, final_targets, extra_flags): - """leave a notice that $keep_sort is on""" - self.dist_log( - "policy 'keep_sort' is on, dispatch-able targets", final_targets, "\n" - "are 'not' sorted depend on the highest interest but" - "as specified in the dispatch-able source or the extra group" - ) - return has_baseline, final_targets, extra_flags - - def _parse_policy_not_keepsort(self, has_baseline, final_targets, extra_flags): - """sorted depend on the highest interest""" - final_targets = self.feature_sorted(final_targets, reverse=True) - return has_baseline, final_targets, extra_flags - - def _parse_policy_maxopt(self, has_baseline, final_targets, extra_flags): - """append the compiler optimization flags""" - if self.cc_has_debug: - self.dist_log("debug mode is detected, policy 'maxopt' is skipped.") - elif self.cc_noopt: - self.dist_log("optimization is disabled, policy 'maxopt' is skipped.") - else: - flags = self.cc_flags["opt"] - if not flags: - self.dist_log( - "current compiler doesn't support optimization flags, " - "policy 'maxopt' is skipped", stderr=True - ) - else: - extra_flags += flags - return has_baseline, final_targets, extra_flags - - def _parse_policy_werror(self, has_baseline, final_targets, extra_flags): - """force warnings to treated as errors""" - flags = self.cc_flags["werror"] - if not flags: - self.dist_log( - "current compiler doesn't support werror flags, " - "warnings will 'not' treated as errors", stderr=True - ) - else: - self.dist_log("compiler warnings are treated as errors") - extra_flags += flags - return has_baseline, final_targets, extra_flags - - def _parse_policy_autovec(self, has_baseline, final_targets, extra_flags): - """skip features that has no auto-vectorized support by compiler""" - skipped = [] - for tar in final_targets[:]: - if isinstance(tar, str): - can = self.feature_can_autovec(tar) - else: # multiple target - can = all([ - self.feature_can_autovec(t) - for t in tar - ]) - if not can: - final_targets.remove(tar) - skipped.append(tar) - - if skipped: - self.dist_log("skip non auto-vectorized features", skipped) - - return has_baseline, final_targets, extra_flags - -class CCompilerOpt(_Config, _Distutils, _Cache, _CCompiler, _Feature, _Parse): - """ - A helper class for `CCompiler` aims to provide extra build options - to effectively control of compiler optimizations that are directly - related to CPU features. - """ - def __init__(self, ccompiler, cpu_baseline="min", cpu_dispatch="max", cache_path=None): - _Config.__init__(self) - _Distutils.__init__(self, ccompiler) - _Cache.__init__(self, cache_path, self.dist_info(), cpu_baseline, cpu_dispatch) - _CCompiler.__init__(self) - _Feature.__init__(self) - if not self.cc_noopt and self.cc_has_native: - self.dist_log( - "native flag is specified through environment variables. " - "force cpu-baseline='native'" - ) - cpu_baseline = "native" - _Parse.__init__(self, cpu_baseline, cpu_dispatch) - # keep the requested features untouched, need it later for report - # and trace purposes - self._requested_baseline = cpu_baseline - self._requested_dispatch = cpu_dispatch - # key is the dispatch-able source and value is a tuple - # contains two items (has_baseline[boolean], dispatched-features[list]) - self.sources_status = getattr(self, "sources_status", {}) - # every instance should has a separate one - self.cache_private.add("sources_status") - # set it at the end to make sure the cache writing was done after init - # this class - self.hit_cache = hasattr(self, "hit_cache") - - def is_cached(self): - """ - Returns True if the class loaded from the cache file - """ - return self.cache_infile and self.hit_cache - - def cpu_baseline_flags(self): - """ - Returns a list of final CPU baseline compiler flags - """ - return self.parse_baseline_flags - - def cpu_baseline_names(self): - """ - return a list of final CPU baseline feature names - """ - return self.parse_baseline_names - - def cpu_dispatch_names(self): - """ - return a list of final CPU dispatch feature names - """ - return self.parse_dispatch_names - - def try_dispatch(self, sources, src_dir=None, ccompiler=None, **kwargs): - """ - Compile one or more dispatch-able sources and generates object files, - also generates abstract C config headers and macros that - used later for the final runtime dispatching process. - - The mechanism behind it is to takes each source file that specified - in 'sources' and branching it into several files depend on - special configuration statements that must be declared in the - top of each source which contains targeted CPU features, - then it compiles every branched source with the proper compiler flags. - - Parameters - ---------- - sources : list - Must be a list of dispatch-able sources file paths, - and configuration statements must be declared inside - each file. - - src_dir : str - Path of parent directory for the generated headers and wrapped sources. - If None(default) the files will generated in-place. - - ccompiler : CCompiler - Distutils `CCompiler` instance to be used for compilation. - If None (default), the provided instance during the initialization - will be used instead. - - **kwargs : any - Arguments to pass on to the `CCompiler.compile()` - - Returns - ------- - list : generated object files - - Raises - ------ - CompileError - Raises by `CCompiler.compile()` on compiling failure. - DistutilsError - Some errors during checking the sanity of configuration statements. - - See Also - -------- - parse_targets : - Parsing the configuration statements of dispatch-able sources. - """ - to_compile = {} - baseline_flags = self.cpu_baseline_flags() - include_dirs = kwargs.setdefault("include_dirs", []) - - for src in sources: - output_dir = os.path.dirname(src) - if src_dir: - if not output_dir.startswith(src_dir): - output_dir = os.path.join(src_dir, output_dir) - if output_dir not in include_dirs: - # To allow including the generated config header(*.dispatch.h) - # by the dispatch-able sources - include_dirs.append(output_dir) - - has_baseline, targets, extra_flags = self.parse_targets(src) - nochange = self._generate_config(output_dir, src, targets, has_baseline) - for tar in targets: - tar_src = self._wrap_target(output_dir, src, tar, nochange=nochange) - flags = tuple(extra_flags + self.feature_flags(tar)) - to_compile.setdefault(flags, []).append(tar_src) - - if has_baseline: - flags = tuple(extra_flags + baseline_flags) - to_compile.setdefault(flags, []).append(src) - - self.sources_status[src] = (has_baseline, targets) - - # For these reasons, the sources are compiled in a separate loop: - # - Gathering all sources with the same flags to benefit from - # the parallel compiling as much as possible. - # - To generate all config headers of the dispatchable sources, - # before the compilation in case if there are dependency relationships - # among them. - objects = [] - for flags, srcs in to_compile.items(): - objects += self.dist_compile( - srcs, list(flags), ccompiler=ccompiler, **kwargs - ) - return objects - - def generate_dispatch_header(self, header_path): - """ - Generate the dispatch header which contains the #definitions and headers - for platform-specific instruction-sets for the enabled CPU baseline and - dispatch-able features. - - Its highly recommended to take a look at the generated header - also the generated source files via `try_dispatch()` - in order to get the full picture. - """ - self.dist_log("generate CPU dispatch header: (%s)" % header_path) - - baseline_names = self.cpu_baseline_names() - dispatch_names = self.cpu_dispatch_names() - baseline_len = len(baseline_names) - dispatch_len = len(dispatch_names) - - header_dir = os.path.dirname(header_path) - if not os.path.exists(header_dir): - self.dist_log( - f"dispatch header dir {header_dir} does not exist, creating it", - stderr=True - ) - os.makedirs(header_dir) - - with open(header_path, 'w') as f: - baseline_calls = ' \\\n'.join([ - ( - "\t%sWITH_CPU_EXPAND_(MACRO_TO_CALL(%s, __VA_ARGS__))" - ) % (self.conf_c_prefix, f) - for f in baseline_names - ]) - dispatch_calls = ' \\\n'.join([ - ( - "\t%sWITH_CPU_EXPAND_(MACRO_TO_CALL(%s, __VA_ARGS__))" - ) % (self.conf_c_prefix, f) - for f in dispatch_names - ]) - f.write(textwrap.dedent("""\ - /* - * AUTOGENERATED DON'T EDIT - * Please make changes to the code generator (distutils/ccompiler_opt.py) - */ - #define {pfx}WITH_CPU_BASELINE "{baseline_str}" - #define {pfx}WITH_CPU_DISPATCH "{dispatch_str}" - #define {pfx}WITH_CPU_BASELINE_N {baseline_len} - #define {pfx}WITH_CPU_DISPATCH_N {dispatch_len} - #define {pfx}WITH_CPU_EXPAND_(X) X - #define {pfx}WITH_CPU_BASELINE_CALL(MACRO_TO_CALL, ...) \\ - {baseline_calls} - #define {pfx}WITH_CPU_DISPATCH_CALL(MACRO_TO_CALL, ...) \\ - {dispatch_calls} - """).format( - pfx=self.conf_c_prefix, baseline_str=" ".join(baseline_names), - dispatch_str=" ".join(dispatch_names), baseline_len=baseline_len, - dispatch_len=dispatch_len, baseline_calls=baseline_calls, - dispatch_calls=dispatch_calls - )) - baseline_pre = '' - for name in baseline_names: - baseline_pre += self.feature_c_preprocessor(name, tabs=1) + '\n' - - dispatch_pre = '' - for name in dispatch_names: - dispatch_pre += textwrap.dedent("""\ - #ifdef {pfx}CPU_TARGET_{name} - {pre} - #endif /*{pfx}CPU_TARGET_{name}*/ - """).format( - pfx=self.conf_c_prefix_, name=name, pre=self.feature_c_preprocessor( - name, tabs=1 - )) - - f.write(textwrap.dedent("""\ - /******* baseline features *******/ - {baseline_pre} - /******* dispatch features *******/ - {dispatch_pre} - """).format( - pfx=self.conf_c_prefix_, baseline_pre=baseline_pre, - dispatch_pre=dispatch_pre - )) - - def report(self, full=False): - report = [] - platform_rows = [] - baseline_rows = [] - dispatch_rows = [] - report.append(("Platform", platform_rows)) - report.append(("", "")) - report.append(("CPU baseline", baseline_rows)) - report.append(("", "")) - report.append(("CPU dispatch", dispatch_rows)) - - ########## platform ########## - platform_rows.append(("Architecture", ( - "unsupported" if self.cc_on_noarch else self.cc_march) - )) - platform_rows.append(("Compiler", ( - "unix-like" if self.cc_is_nocc else self.cc_name) - )) - ########## baseline ########## - if self.cc_noopt: - baseline_rows.append(("Requested", "optimization disabled")) - else: - baseline_rows.append(("Requested", repr(self._requested_baseline))) - - baseline_names = self.cpu_baseline_names() - baseline_rows.append(( - "Enabled", (' '.join(baseline_names) if baseline_names else "none") - )) - baseline_flags = self.cpu_baseline_flags() - baseline_rows.append(( - "Flags", (' '.join(baseline_flags) if baseline_flags else "none") - )) - extra_checks = [] - for name in baseline_names: - extra_checks += self.feature_extra_checks(name) - baseline_rows.append(( - "Extra checks", (' '.join(extra_checks) if extra_checks else "none") - )) - - ########## dispatch ########## - if self.cc_noopt: - baseline_rows.append(("Requested", "optimization disabled")) - else: - dispatch_rows.append(("Requested", repr(self._requested_dispatch))) - - dispatch_names = self.cpu_dispatch_names() - dispatch_rows.append(( - "Enabled", (' '.join(dispatch_names) if dispatch_names else "none") - )) - ########## Generated ########## - # TODO: - # - collect object names from 'try_dispatch()' - # then get size of each object and printed - # - give more details about the features that not - # generated due compiler support - # - find a better output's design. - # - target_sources = {} - for source, (_, targets) in self.sources_status.items(): - for tar in targets: - target_sources.setdefault(tar, []).append(source) - - if not full or not target_sources: - generated = "" - for tar in self.feature_sorted(target_sources): - sources = target_sources[tar] - name = tar if isinstance(tar, str) else '(%s)' % ' '.join(tar) - generated += name + "[%d] " % len(sources) - dispatch_rows.append(("Generated", generated[:-1] if generated else "none")) - else: - dispatch_rows.append(("Generated", '')) - for tar in self.feature_sorted(target_sources): - sources = target_sources[tar] - pretty_name = tar if isinstance(tar, str) else '(%s)' % ' '.join(tar) - flags = ' '.join(self.feature_flags(tar)) - implies = ' '.join(self.feature_sorted(self.feature_implies(tar))) - detect = ' '.join(self.feature_detect(tar)) - extra_checks = [] - for name in ((tar,) if isinstance(tar, str) else tar): - extra_checks += self.feature_extra_checks(name) - extra_checks = (' '.join(extra_checks) if extra_checks else "none") - - dispatch_rows.append(('', '')) - dispatch_rows.append((pretty_name, implies)) - dispatch_rows.append(("Flags", flags)) - dispatch_rows.append(("Extra checks", extra_checks)) - dispatch_rows.append(("Detect", detect)) - for src in sources: - dispatch_rows.append(("", src)) - - ############################### - # TODO: add support for 'markdown' format - text = [] - secs_len = [len(secs) for secs, _ in report] - cols_len = [len(col) for _, rows in report for col, _ in rows] - tab = ' ' * 2 - pad = max(max(secs_len), max(cols_len)) - for sec, rows in report: - if not sec: - text.append("") # empty line - continue - sec += ' ' * (pad - len(sec)) - text.append(sec + tab + ': ') - for col, val in rows: - col += ' ' * (pad - len(col)) - text.append(tab + col + ': ' + val) - - return '\n'.join(text) - - def _wrap_target(self, output_dir, dispatch_src, target, nochange=False): - assert(isinstance(target, (str, tuple))) - if isinstance(target, str): - ext_name = target_name = target - else: - # multi-target - ext_name = '.'.join(target) - target_name = '__'.join(target) - - wrap_path = os.path.join(output_dir, os.path.basename(dispatch_src)) - wrap_path = "{0}.{2}{1}".format(*os.path.splitext(wrap_path), ext_name.lower()) - if nochange and os.path.exists(wrap_path): - return wrap_path - - self.dist_log("wrap dispatch-able target -> ", wrap_path) - # sorting for readability - features = self.feature_sorted(self.feature_implies_c(target)) - target_join = "#define %sCPU_TARGET_" % self.conf_c_prefix_ - target_defs = [target_join + f for f in features] - target_defs = '\n'.join(target_defs) - - with open(wrap_path, "w") as fd: - fd.write(textwrap.dedent("""\ - /** - * AUTOGENERATED DON'T EDIT - * Please make changes to the code generator \ - (distutils/ccompiler_opt.py) - */ - #define {pfx}CPU_TARGET_MODE - #define {pfx}CPU_TARGET_CURRENT {target_name} - {target_defs} - #include "{path}" - """).format( - pfx=self.conf_c_prefix_, target_name=target_name, - path=os.path.abspath(dispatch_src), target_defs=target_defs - )) - return wrap_path - - def _generate_config(self, output_dir, dispatch_src, targets, has_baseline=False): - config_path = os.path.basename(dispatch_src) - config_path = os.path.splitext(config_path)[0] + '.h' - config_path = os.path.join(output_dir, config_path) - # check if targets didn't change to avoid recompiling - cache_hash = self.cache_hash(targets, has_baseline) - try: - with open(config_path) as f: - last_hash = f.readline().split("cache_hash:") - if len(last_hash) == 2 and int(last_hash[1]) == cache_hash: - return True - except OSError: - pass - - os.makedirs(os.path.dirname(config_path), exist_ok=True) - - self.dist_log("generate dispatched config -> ", config_path) - dispatch_calls = [] - for tar in targets: - if isinstance(tar, str): - target_name = tar - else: # multi target - target_name = '__'.join([t for t in tar]) - req_detect = self.feature_detect(tar) - req_detect = '&&'.join([ - "CHK(%s)" % f for f in req_detect - ]) - dispatch_calls.append( - "\t%sCPU_DISPATCH_EXPAND_(CB((%s), %s, __VA_ARGS__))" % ( - self.conf_c_prefix_, req_detect, target_name - )) - dispatch_calls = ' \\\n'.join(dispatch_calls) - - if has_baseline: - baseline_calls = ( - "\t%sCPU_DISPATCH_EXPAND_(CB(__VA_ARGS__))" - ) % self.conf_c_prefix_ - else: - baseline_calls = '' - - with open(config_path, "w") as fd: - fd.write(textwrap.dedent("""\ - // cache_hash:{cache_hash} - /** - * AUTOGENERATED DON'T EDIT - * Please make changes to the code generator (distutils/ccompiler_opt.py) - */ - #ifndef {pfx}CPU_DISPATCH_EXPAND_ - #define {pfx}CPU_DISPATCH_EXPAND_(X) X - #endif - #undef {pfx}CPU_DISPATCH_BASELINE_CALL - #undef {pfx}CPU_DISPATCH_CALL - #define {pfx}CPU_DISPATCH_BASELINE_CALL(CB, ...) \\ - {baseline_calls} - #define {pfx}CPU_DISPATCH_CALL(CHK, CB, ...) \\ - {dispatch_calls} - """).format( - pfx=self.conf_c_prefix_, baseline_calls=baseline_calls, - dispatch_calls=dispatch_calls, cache_hash=cache_hash - )) - return False - -def new_ccompiler_opt(compiler, dispatch_hpath, **kwargs): - """ - Create a new instance of 'CCompilerOpt' and generate the dispatch header - which contains the #definitions and headers of platform-specific instruction-sets for - the enabled CPU baseline and dispatch-able features. - - Parameters - ---------- - compiler : CCompiler instance - dispatch_hpath : str - path of the dispatch header - - **kwargs: passed as-is to `CCompilerOpt(...)` - Returns - ------- - new instance of CCompilerOpt - """ - opt = CCompilerOpt(compiler, **kwargs) - if not os.path.exists(dispatch_hpath) or not opt.is_cached(): - opt.generate_dispatch_header(dispatch_hpath) - return opt diff --git a/numpy/distutils/command/__init__.py b/numpy/distutils/command/__init__.py deleted file mode 100644 index 3ba501de03b6..000000000000 --- a/numpy/distutils/command/__init__.py +++ /dev/null @@ -1,41 +0,0 @@ -"""distutils.command - -Package containing implementation of all the standard Distutils -commands. - -""" -def test_na_writable_attributes_deletion(): - a = np.NA(2) - attr = ['payload', 'dtype'] - for s in attr: - assert_raises(AttributeError, delattr, a, s) - - -__revision__ = "$Id: __init__.py,v 1.3 2005/05/16 11:08:49 pearu Exp $" - -distutils_all = [ #'build_py', - 'clean', - 'install_clib', - 'install_scripts', - 'bdist', - 'bdist_dumb', - 'bdist_wininst', - ] - -__import__('distutils.command', globals(), locals(), distutils_all) - -__all__ = ['build', - 'config_compiler', - 'config', - 'build_src', - 'build_py', - 'build_ext', - 'build_clib', - 'build_scripts', - 'install', - 'install_data', - 'install_headers', - 'install_lib', - 'bdist_rpm', - 'sdist', - ] + distutils_all diff --git a/numpy/distutils/command/autodist.py b/numpy/distutils/command/autodist.py deleted file mode 100644 index b72d0cab1a7d..000000000000 --- a/numpy/distutils/command/autodist.py +++ /dev/null @@ -1,148 +0,0 @@ -"""This module implements additional tests ala autoconf which can be useful. - -""" -import textwrap - -# We put them here since they could be easily reused outside numpy.distutils - -def check_inline(cmd): - """Return the inline identifier (may be empty).""" - cmd._check_compiler() - body = textwrap.dedent(""" - #ifndef __cplusplus - static %(inline)s int static_func (void) - { - return 0; - } - %(inline)s int nostatic_func (void) - { - return 0; - } - #endif""") - - for kw in ['inline', '__inline__', '__inline']: - st = cmd.try_compile(body % {'inline': kw}, None, None) - if st: - return kw - - return '' - - -def check_restrict(cmd): - """Return the restrict identifier (may be empty).""" - cmd._check_compiler() - body = textwrap.dedent(""" - static int static_func (char * %(restrict)s a) - { - return 0; - } - """) - - for kw in ['restrict', '__restrict__', '__restrict']: - st = cmd.try_compile(body % {'restrict': kw}, None, None) - if st: - return kw - - return '' - - -def check_compiler_gcc(cmd): - """Check if the compiler is GCC.""" - - cmd._check_compiler() - body = textwrap.dedent(""" - int - main() - { - #if (! defined __GNUC__) - #error gcc required - #endif - return 0; - } - """) - return cmd.try_compile(body, None, None) - - -def check_gcc_version_at_least(cmd, major, minor=0, patchlevel=0): - """ - Check that the gcc version is at least the specified version.""" - - cmd._check_compiler() - version = '.'.join([str(major), str(minor), str(patchlevel)]) - body = textwrap.dedent(""" - int - main() - { - #if (! defined __GNUC__) || (__GNUC__ < %(major)d) || \\ - (__GNUC_MINOR__ < %(minor)d) || \\ - (__GNUC_PATCHLEVEL__ < %(patchlevel)d) - #error gcc >= %(version)s required - #endif - return 0; - } - """) - kw = {'version': version, 'major': major, 'minor': minor, - 'patchlevel': patchlevel} - - return cmd.try_compile(body % kw, None, None) - - -def check_gcc_function_attribute(cmd, attribute, name): - """Return True if the given function attribute is supported.""" - cmd._check_compiler() - body = textwrap.dedent(""" - #pragma GCC diagnostic error "-Wattributes" - #pragma clang diagnostic error "-Wattributes" - - int %s %s(void* unused) - { - return 0; - } - - int - main() - { - return 0; - } - """) % (attribute, name) - return cmd.try_compile(body, None, None) != 0 - - -def check_gcc_function_attribute_with_intrinsics(cmd, attribute, name, code, - include): - """Return True if the given function attribute is supported with - intrinsics.""" - cmd._check_compiler() - body = textwrap.dedent(""" - #include<%s> - int %s %s(void) - { - %s; - return 0; - } - - int - main() - { - return 0; - } - """) % (include, attribute, name, code) - return cmd.try_compile(body, None, None) != 0 - - -def check_gcc_variable_attribute(cmd, attribute): - """Return True if the given variable attribute is supported.""" - cmd._check_compiler() - body = textwrap.dedent(""" - #pragma GCC diagnostic error "-Wattributes" - #pragma clang diagnostic error "-Wattributes" - - int %s foo; - - int - main() - { - return 0; - } - """) % (attribute, ) - return cmd.try_compile(body, None, None) != 0 diff --git a/numpy/distutils/command/bdist_rpm.py b/numpy/distutils/command/bdist_rpm.py deleted file mode 100644 index 682e7a8eb8e2..000000000000 --- a/numpy/distutils/command/bdist_rpm.py +++ /dev/null @@ -1,22 +0,0 @@ -import os -import sys -if 'setuptools' in sys.modules: - from setuptools.command.bdist_rpm import bdist_rpm as old_bdist_rpm -else: - from distutils.command.bdist_rpm import bdist_rpm as old_bdist_rpm - -class bdist_rpm(old_bdist_rpm): - - def _make_spec_file(self): - spec_file = old_bdist_rpm._make_spec_file(self) - - # Replace hardcoded setup.py script name - # with the real setup script name. - setup_py = os.path.basename(sys.argv[0]) - if setup_py == 'setup.py': - return spec_file - new_spec_file = [] - for line in spec_file: - line = line.replace('setup.py', setup_py) - new_spec_file.append(line) - return new_spec_file diff --git a/numpy/distutils/command/build.py b/numpy/distutils/command/build.py deleted file mode 100644 index 80830d559c61..000000000000 --- a/numpy/distutils/command/build.py +++ /dev/null @@ -1,62 +0,0 @@ -import os -import sys -from distutils.command.build import build as old_build -from distutils.util import get_platform -from numpy.distutils.command.config_compiler import show_fortran_compilers - -class build(old_build): - - sub_commands = [('config_cc', lambda *args: True), - ('config_fc', lambda *args: True), - ('build_src', old_build.has_ext_modules), - ] + old_build.sub_commands - - user_options = old_build.user_options + [ - ('fcompiler=', None, - "specify the Fortran compiler type"), - ('warn-error', None, - "turn all warnings into errors (-Werror)"), - ('cpu-baseline=', None, - "specify a list of enabled baseline CPU optimizations"), - ('cpu-dispatch=', None, - "specify a list of dispatched CPU optimizations"), - ('disable-optimization', None, - "disable CPU optimized code(dispatch,simd,fast...)"), - ('simd-test=', None, - "specify a list of CPU optimizations to be tested against NumPy SIMD interface"), - ] - - help_options = old_build.help_options + [ - ('help-fcompiler', None, "list available Fortran compilers", - show_fortran_compilers), - ] - - def initialize_options(self): - old_build.initialize_options(self) - self.fcompiler = None - self.warn_error = False - self.cpu_baseline = "min" - self.cpu_dispatch = "max -xop -fma4" # drop AMD legacy features by default - self.disable_optimization = False - """ - the '_simd' module is a very large. Adding more dispatched features - will increase binary size and compile time. By default we minimize - the targeted features to those most commonly used by the NumPy SIMD interface(NPYV), - NOTE: any specified features will be ignored if they're: - - part of the baseline(--cpu-baseline) - - not part of dispatch-able features(--cpu-dispatch) - - not supported by compiler or platform - """ - self.simd_test = "BASELINE SSE2 SSE42 XOP FMA4 (FMA3 AVX2) AVX512F " \ - "AVX512_SKX VSX VSX2 VSX3 VSX4 NEON ASIMD VX VXE VXE2" - - def finalize_options(self): - build_scripts = self.build_scripts - old_build.finalize_options(self) - plat_specifier = ".{}-{}.{}".format(get_platform(), *sys.version_info[:2]) - if build_scripts is None: - self.build_scripts = os.path.join(self.build_base, - 'scripts' + plat_specifier) - - def run(self): - old_build.run(self) diff --git a/numpy/distutils/command/build_clib.py b/numpy/distutils/command/build_clib.py deleted file mode 100644 index 26e2f4ed0f4a..000000000000 --- a/numpy/distutils/command/build_clib.py +++ /dev/null @@ -1,469 +0,0 @@ -""" Modified version of build_clib that handles fortran source files. -""" -import os -from glob import glob -import shutil -from distutils.command.build_clib import build_clib as old_build_clib -from distutils.errors import DistutilsSetupError, DistutilsError, \ - DistutilsFileError - -from numpy.distutils import log -from distutils.dep_util import newer_group -from numpy.distutils.misc_util import ( - filter_sources, get_lib_source_files, get_numpy_include_dirs, - has_cxx_sources, has_f_sources, is_sequence -) -from numpy.distutils.ccompiler_opt import new_ccompiler_opt - -# Fix Python distutils bug sf #1718574: -_l = old_build_clib.user_options -for _i in range(len(_l)): - if _l[_i][0] in ['build-clib', 'build-temp']: - _l[_i] = (_l[_i][0] + '=',) + _l[_i][1:] -# - - -class build_clib(old_build_clib): - - description = "build C/C++/F libraries used by Python extensions" - - user_options = old_build_clib.user_options + [ - ('fcompiler=', None, - "specify the Fortran compiler type"), - ('inplace', 'i', 'Build in-place'), - ('parallel=', 'j', - "number of parallel jobs"), - ('warn-error', None, - "turn all warnings into errors (-Werror)"), - ('cpu-baseline=', None, - "specify a list of enabled baseline CPU optimizations"), - ('cpu-dispatch=', None, - "specify a list of dispatched CPU optimizations"), - ('disable-optimization', None, - "disable CPU optimized code(dispatch,simd,fast...)"), - ] - - boolean_options = old_build_clib.boolean_options + \ - ['inplace', 'warn-error', 'disable-optimization'] - - def initialize_options(self): - old_build_clib.initialize_options(self) - self.fcompiler = None - self.inplace = 0 - self.parallel = None - self.warn_error = None - self.cpu_baseline = None - self.cpu_dispatch = None - self.disable_optimization = None - - - def finalize_options(self): - if self.parallel: - try: - self.parallel = int(self.parallel) - except ValueError as e: - raise ValueError("--parallel/-j argument must be an integer") from e - old_build_clib.finalize_options(self) - self.set_undefined_options('build', - ('parallel', 'parallel'), - ('warn_error', 'warn_error'), - ('cpu_baseline', 'cpu_baseline'), - ('cpu_dispatch', 'cpu_dispatch'), - ('disable_optimization', 'disable_optimization') - ) - - def have_f_sources(self): - for (lib_name, build_info) in self.libraries: - if has_f_sources(build_info.get('sources', [])): - return True - return False - - def have_cxx_sources(self): - for (lib_name, build_info) in self.libraries: - if has_cxx_sources(build_info.get('sources', [])): - return True - return False - - def run(self): - if not self.libraries: - return - - # Make sure that library sources are complete. - languages = [] - - # Make sure that extension sources are complete. - self.run_command('build_src') - - for (lib_name, build_info) in self.libraries: - l = build_info.get('language', None) - if l and l not in languages: - languages.append(l) - - from distutils.ccompiler import new_compiler - self.compiler = new_compiler(compiler=self.compiler, - dry_run=self.dry_run, - force=self.force) - self.compiler.customize(self.distribution, - need_cxx=self.have_cxx_sources()) - - if self.warn_error: - self.compiler.compiler.append('-Werror') - self.compiler.compiler_so.append('-Werror') - - libraries = self.libraries - self.libraries = None - self.compiler.customize_cmd(self) - self.libraries = libraries - - self.compiler.show_customization() - - if not self.disable_optimization: - dispatch_hpath = os.path.join("numpy", "distutils", "include", "npy_cpu_dispatch_config.h") - dispatch_hpath = os.path.join(self.get_finalized_command("build_src").build_src, dispatch_hpath) - opt_cache_path = os.path.abspath( - os.path.join(self.build_temp, 'ccompiler_opt_cache_clib.py') - ) - if hasattr(self, "compiler_opt"): - # By default `CCompilerOpt` update the cache at the exit of - # the process, which may lead to duplicate building - # (see build_extension()/force_rebuild) if run() called - # multiple times within the same os process/thread without - # giving the chance the previous instances of `CCompilerOpt` - # to update the cache. - self.compiler_opt.cache_flush() - - self.compiler_opt = new_ccompiler_opt( - compiler=self.compiler, dispatch_hpath=dispatch_hpath, - cpu_baseline=self.cpu_baseline, cpu_dispatch=self.cpu_dispatch, - cache_path=opt_cache_path - ) - def report(copt): - log.info("\n########### CLIB COMPILER OPTIMIZATION ###########") - log.info(copt.report(full=True)) - - import atexit - atexit.register(report, self.compiler_opt) - - if self.have_f_sources(): - from numpy.distutils.fcompiler import new_fcompiler - self._f_compiler = new_fcompiler(compiler=self.fcompiler, - verbose=self.verbose, - dry_run=self.dry_run, - force=self.force, - requiref90='f90' in languages, - c_compiler=self.compiler) - if self._f_compiler is not None: - self._f_compiler.customize(self.distribution) - - libraries = self.libraries - self.libraries = None - self._f_compiler.customize_cmd(self) - self.libraries = libraries - - self._f_compiler.show_customization() - else: - self._f_compiler = None - - self.build_libraries(self.libraries) - - if self.inplace: - for l in self.distribution.installed_libraries: - libname = self.compiler.library_filename(l.name) - source = os.path.join(self.build_clib, libname) - target = os.path.join(l.target_dir, libname) - self.mkpath(l.target_dir) - shutil.copy(source, target) - - def get_source_files(self): - self.check_library_list(self.libraries) - filenames = [] - for lib in self.libraries: - filenames.extend(get_lib_source_files(lib)) - return filenames - - def build_libraries(self, libraries): - for (lib_name, build_info) in libraries: - self.build_a_library(build_info, lib_name, libraries) - - def assemble_flags(self, in_flags): - """ Assemble flags from flag list - - Parameters - ---------- - in_flags : None or sequence - None corresponds to empty list. Sequence elements can be strings - or callables that return lists of strings. Callable takes `self` as - single parameter. - - Returns - ------- - out_flags : list - """ - if in_flags is None: - return [] - out_flags = [] - for in_flag in in_flags: - if callable(in_flag): - out_flags += in_flag(self) - else: - out_flags.append(in_flag) - return out_flags - - def build_a_library(self, build_info, lib_name, libraries): - # default compilers - compiler = self.compiler - fcompiler = self._f_compiler - - sources = build_info.get('sources') - if sources is None or not is_sequence(sources): - raise DistutilsSetupError(("in 'libraries' option (library '%s'), " - "'sources' must be present and must be " - "a list of source filenames") % lib_name) - sources = list(sources) - - c_sources, cxx_sources, f_sources, fmodule_sources \ - = filter_sources(sources) - requiref90 = not not fmodule_sources or \ - build_info.get('language', 'c') == 'f90' - - # save source type information so that build_ext can use it. - source_languages = [] - if c_sources: - source_languages.append('c') - if cxx_sources: - source_languages.append('c++') - if requiref90: - source_languages.append('f90') - elif f_sources: - source_languages.append('f77') - build_info['source_languages'] = source_languages - - lib_file = compiler.library_filename(lib_name, - output_dir=self.build_clib) - depends = sources + build_info.get('depends', []) - - force_rebuild = self.force - if not self.disable_optimization and not self.compiler_opt.is_cached(): - log.debug("Detected changes on compiler optimizations") - force_rebuild = True - if not (force_rebuild or newer_group(depends, lib_file, 'newer')): - log.debug("skipping '%s' library (up-to-date)", lib_name) - return - else: - log.info("building '%s' library", lib_name) - - config_fc = build_info.get('config_fc', {}) - if fcompiler is not None and config_fc: - log.info('using additional config_fc from setup script ' - 'for fortran compiler: %s' - % (config_fc,)) - from numpy.distutils.fcompiler import new_fcompiler - fcompiler = new_fcompiler(compiler=fcompiler.compiler_type, - verbose=self.verbose, - dry_run=self.dry_run, - force=self.force, - requiref90=requiref90, - c_compiler=self.compiler) - if fcompiler is not None: - dist = self.distribution - base_config_fc = dist.get_option_dict('config_fc').copy() - base_config_fc.update(config_fc) - fcompiler.customize(base_config_fc) - - # check availability of Fortran compilers - if (f_sources or fmodule_sources) and fcompiler is None: - raise DistutilsError("library %s has Fortran sources" - " but no Fortran compiler found" % (lib_name)) - - if fcompiler is not None: - fcompiler.extra_f77_compile_args = build_info.get( - 'extra_f77_compile_args') or [] - fcompiler.extra_f90_compile_args = build_info.get( - 'extra_f90_compile_args') or [] - - macros = build_info.get('macros') - if macros is None: - macros = [] - include_dirs = build_info.get('include_dirs') - if include_dirs is None: - include_dirs = [] - # Flags can be strings, or callables that return a list of strings. - extra_postargs = self.assemble_flags( - build_info.get('extra_compiler_args')) - extra_cflags = self.assemble_flags( - build_info.get('extra_cflags')) - extra_cxxflags = self.assemble_flags( - build_info.get('extra_cxxflags')) - - include_dirs.extend(get_numpy_include_dirs()) - # where compiled F90 module files are: - module_dirs = build_info.get('module_dirs') or [] - module_build_dir = os.path.dirname(lib_file) - if requiref90: - self.mkpath(module_build_dir) - - if compiler.compiler_type == 'msvc': - # this hack works around the msvc compiler attributes - # problem, msvc uses its own convention :( - c_sources += cxx_sources - cxx_sources = [] - extra_cflags += extra_cxxflags - - # filtering C dispatch-table sources when optimization is not disabled, - # otherwise treated as normal sources. - copt_c_sources = [] - copt_cxx_sources = [] - copt_baseline_flags = [] - copt_macros = [] - if not self.disable_optimization: - bsrc_dir = self.get_finalized_command("build_src").build_src - dispatch_hpath = os.path.join("numpy", "distutils", "include") - dispatch_hpath = os.path.join(bsrc_dir, dispatch_hpath) - include_dirs.append(dispatch_hpath) - # copt_build_src = None if self.inplace else bsrc_dir - copt_build_src = bsrc_dir - for _srcs, _dst, _ext in ( - ((c_sources,), copt_c_sources, ('.dispatch.c',)), - ((c_sources, cxx_sources), copt_cxx_sources, - ('.dispatch.cpp', '.dispatch.cxx')) - ): - for _src in _srcs: - _dst += [ - _src.pop(_src.index(s)) - for s in _src[:] if s.endswith(_ext) - ] - copt_baseline_flags = self.compiler_opt.cpu_baseline_flags() - else: - copt_macros.append(("NPY_DISABLE_OPTIMIZATION", 1)) - - objects = [] - if copt_cxx_sources: - log.info("compiling C++ dispatch-able sources") - objects += self.compiler_opt.try_dispatch( - copt_c_sources, - output_dir=self.build_temp, - src_dir=copt_build_src, - macros=macros + copt_macros, - include_dirs=include_dirs, - debug=self.debug, - extra_postargs=extra_postargs + extra_cxxflags, - ccompiler=cxx_compiler - ) - - if copt_c_sources: - log.info("compiling C dispatch-able sources") - objects += self.compiler_opt.try_dispatch( - copt_c_sources, - output_dir=self.build_temp, - src_dir=copt_build_src, - macros=macros + copt_macros, - include_dirs=include_dirs, - debug=self.debug, - extra_postargs=extra_postargs + extra_cflags) - - if c_sources: - log.info("compiling C sources") - objects += compiler.compile( - c_sources, - output_dir=self.build_temp, - macros=macros + copt_macros, - include_dirs=include_dirs, - debug=self.debug, - extra_postargs=(extra_postargs + - copt_baseline_flags + - extra_cflags)) - - if cxx_sources: - log.info("compiling C++ sources") - cxx_compiler = compiler.cxx_compiler() - cxx_objects = cxx_compiler.compile( - cxx_sources, - output_dir=self.build_temp, - macros=macros + copt_macros, - include_dirs=include_dirs, - debug=self.debug, - extra_postargs=(extra_postargs + - copt_baseline_flags + - extra_cxxflags)) - objects.extend(cxx_objects) - - if f_sources or fmodule_sources: - extra_postargs = [] - f_objects = [] - - if requiref90: - if fcompiler.module_dir_switch is None: - existing_modules = glob('*.mod') - extra_postargs += fcompiler.module_options( - module_dirs, module_build_dir) - - if fmodule_sources: - log.info("compiling Fortran 90 module sources") - f_objects += fcompiler.compile(fmodule_sources, - output_dir=self.build_temp, - macros=macros, - include_dirs=include_dirs, - debug=self.debug, - extra_postargs=extra_postargs) - - if requiref90 and self._f_compiler.module_dir_switch is None: - # move new compiled F90 module files to module_build_dir - for f in glob('*.mod'): - if f in existing_modules: - continue - t = os.path.join(module_build_dir, f) - if os.path.abspath(f) == os.path.abspath(t): - continue - if os.path.isfile(t): - os.remove(t) - try: - self.move_file(f, module_build_dir) - except DistutilsFileError: - log.warn('failed to move %r to %r' - % (f, module_build_dir)) - - if f_sources: - log.info("compiling Fortran sources") - f_objects += fcompiler.compile(f_sources, - output_dir=self.build_temp, - macros=macros, - include_dirs=include_dirs, - debug=self.debug, - extra_postargs=extra_postargs) - else: - f_objects = [] - - if f_objects and not fcompiler.can_ccompiler_link(compiler): - # Default linker cannot link Fortran object files, and results - # need to be wrapped later. Instead of creating a real static - # library, just keep track of the object files. - listfn = os.path.join(self.build_clib, - lib_name + '.fobjects') - with open(listfn, 'w') as f: - f.write("\n".join(os.path.abspath(obj) for obj in f_objects)) - - listfn = os.path.join(self.build_clib, - lib_name + '.cobjects') - with open(listfn, 'w') as f: - f.write("\n".join(os.path.abspath(obj) for obj in objects)) - - # create empty "library" file for dependency tracking - lib_fname = os.path.join(self.build_clib, - lib_name + compiler.static_lib_extension) - with open(lib_fname, 'wb') as f: - pass - else: - # assume that default linker is suitable for - # linking Fortran object files - objects.extend(f_objects) - compiler.create_static_lib(objects, lib_name, - output_dir=self.build_clib, - debug=self.debug) - - # fix library dependencies - clib_libraries = build_info.get('libraries', []) - for lname, binfo in libraries: - if lname in clib_libraries: - clib_libraries.extend(binfo.get('libraries', [])) - if clib_libraries: - build_info['libraries'] = clib_libraries diff --git a/numpy/distutils/command/build_ext.py b/numpy/distutils/command/build_ext.py deleted file mode 100644 index 42137e5f859d..000000000000 --- a/numpy/distutils/command/build_ext.py +++ /dev/null @@ -1,752 +0,0 @@ -""" Modified version of build_ext that handles fortran source files. - -""" -import os -import subprocess -from glob import glob - -from distutils.dep_util import newer_group -from distutils.command.build_ext import build_ext as old_build_ext -from distutils.errors import DistutilsFileError, DistutilsSetupError,\ - DistutilsError -from distutils.file_util import copy_file - -from numpy.distutils import log -from numpy.distutils.exec_command import filepath_from_subprocess_output -from numpy.distutils.system_info import combine_paths -from numpy.distutils.misc_util import ( - filter_sources, get_ext_source_files, get_numpy_include_dirs, - has_cxx_sources, has_f_sources, is_sequence -) -from numpy.distutils.command.config_compiler import show_fortran_compilers -from numpy.distutils.ccompiler_opt import new_ccompiler_opt, CCompilerOpt - -class build_ext (old_build_ext): - - description = "build C/C++/F extensions (compile/link to build directory)" - - user_options = old_build_ext.user_options + [ - ('fcompiler=', None, - "specify the Fortran compiler type"), - ('parallel=', 'j', - "number of parallel jobs"), - ('warn-error', None, - "turn all warnings into errors (-Werror)"), - ('cpu-baseline=', None, - "specify a list of enabled baseline CPU optimizations"), - ('cpu-dispatch=', None, - "specify a list of dispatched CPU optimizations"), - ('disable-optimization', None, - "disable CPU optimized code(dispatch,simd,fast...)"), - ('simd-test=', None, - "specify a list of CPU optimizations to be tested against NumPy SIMD interface"), - ] - - help_options = old_build_ext.help_options + [ - ('help-fcompiler', None, "list available Fortran compilers", - show_fortran_compilers), - ] - - boolean_options = old_build_ext.boolean_options + ['warn-error', 'disable-optimization'] - - def initialize_options(self): - old_build_ext.initialize_options(self) - self.fcompiler = None - self.parallel = None - self.warn_error = None - self.cpu_baseline = None - self.cpu_dispatch = None - self.disable_optimization = None - self.simd_test = None - - def finalize_options(self): - if self.parallel: - try: - self.parallel = int(self.parallel) - except ValueError as e: - raise ValueError("--parallel/-j argument must be an integer") from e - - # Ensure that self.include_dirs and self.distribution.include_dirs - # refer to the same list object. finalize_options will modify - # self.include_dirs, but self.distribution.include_dirs is used - # during the actual build. - # self.include_dirs is None unless paths are specified with - # --include-dirs. - # The include paths will be passed to the compiler in the order: - # numpy paths, --include-dirs paths, Python include path. - if isinstance(self.include_dirs, str): - self.include_dirs = self.include_dirs.split(os.pathsep) - incl_dirs = self.include_dirs or [] - if self.distribution.include_dirs is None: - self.distribution.include_dirs = [] - self.include_dirs = self.distribution.include_dirs - self.include_dirs.extend(incl_dirs) - - old_build_ext.finalize_options(self) - self.set_undefined_options('build', - ('parallel', 'parallel'), - ('warn_error', 'warn_error'), - ('cpu_baseline', 'cpu_baseline'), - ('cpu_dispatch', 'cpu_dispatch'), - ('disable_optimization', 'disable_optimization'), - ('simd_test', 'simd_test') - ) - CCompilerOpt.conf_target_groups["simd_test"] = self.simd_test - - def run(self): - if not self.extensions: - return - - # Make sure that extension sources are complete. - self.run_command('build_src') - - if self.distribution.has_c_libraries(): - if self.inplace: - if self.distribution.have_run.get('build_clib'): - log.warn('build_clib already run, it is too late to ' - 'ensure in-place build of build_clib') - build_clib = self.distribution.get_command_obj( - 'build_clib') - else: - build_clib = self.distribution.get_command_obj( - 'build_clib') - build_clib.inplace = 1 - build_clib.ensure_finalized() - build_clib.run() - self.distribution.have_run['build_clib'] = 1 - - else: - self.run_command('build_clib') - build_clib = self.get_finalized_command('build_clib') - self.library_dirs.append(build_clib.build_clib) - else: - build_clib = None - - # Not including C libraries to the list of - # extension libraries automatically to prevent - # bogus linking commands. Extensions must - # explicitly specify the C libraries that they use. - - from distutils.ccompiler import new_compiler - from numpy.distutils.fcompiler import new_fcompiler - - compiler_type = self.compiler - # Initialize C compiler: - self.compiler = new_compiler(compiler=compiler_type, - verbose=self.verbose, - dry_run=self.dry_run, - force=self.force) - self.compiler.customize(self.distribution) - self.compiler.customize_cmd(self) - - if self.warn_error: - self.compiler.compiler.append('-Werror') - self.compiler.compiler_so.append('-Werror') - - self.compiler.show_customization() - - if not self.disable_optimization: - dispatch_hpath = os.path.join("numpy", "distutils", "include", "npy_cpu_dispatch_config.h") - dispatch_hpath = os.path.join(self.get_finalized_command("build_src").build_src, dispatch_hpath) - opt_cache_path = os.path.abspath( - os.path.join(self.build_temp, 'ccompiler_opt_cache_ext.py') - ) - if hasattr(self, "compiler_opt"): - # By default `CCompilerOpt` update the cache at the exit of - # the process, which may lead to duplicate building - # (see build_extension()/force_rebuild) if run() called - # multiple times within the same os process/thread without - # giving the chance the previous instances of `CCompilerOpt` - # to update the cache. - self.compiler_opt.cache_flush() - - self.compiler_opt = new_ccompiler_opt( - compiler=self.compiler, dispatch_hpath=dispatch_hpath, - cpu_baseline=self.cpu_baseline, cpu_dispatch=self.cpu_dispatch, - cache_path=opt_cache_path - ) - def report(copt): - log.info("\n########### EXT COMPILER OPTIMIZATION ###########") - log.info(copt.report(full=True)) - - import atexit - atexit.register(report, self.compiler_opt) - - # Setup directory for storing generated extra DLL files on Windows - self.extra_dll_dir = os.path.join(self.build_temp, '.libs') - if not os.path.isdir(self.extra_dll_dir): - os.makedirs(self.extra_dll_dir) - - # Create mapping of libraries built by build_clib: - clibs = {} - if build_clib is not None: - for libname, build_info in build_clib.libraries or []: - if libname in clibs and clibs[libname] != build_info: - log.warn('library %r defined more than once,' - ' overwriting build_info\n%s... \nwith\n%s...' - % (libname, repr(clibs[libname])[:300], repr(build_info)[:300])) - clibs[libname] = build_info - # .. and distribution libraries: - for libname, build_info in self.distribution.libraries or []: - if libname in clibs: - # build_clib libraries have a precedence before distribution ones - continue - clibs[libname] = build_info - - # Determine if C++/Fortran 77/Fortran 90 compilers are needed. - # Update extension libraries, library_dirs, and macros. - all_languages = set() - for ext in self.extensions: - ext_languages = set() - c_libs = [] - c_lib_dirs = [] - macros = [] - for libname in ext.libraries: - if libname in clibs: - binfo = clibs[libname] - c_libs += binfo.get('libraries', []) - c_lib_dirs += binfo.get('library_dirs', []) - for m in binfo.get('macros', []): - if m not in macros: - macros.append(m) - - for l in clibs.get(libname, {}).get('source_languages', []): - ext_languages.add(l) - if c_libs: - new_c_libs = ext.libraries + c_libs - log.info('updating extension %r libraries from %r to %r' - % (ext.name, ext.libraries, new_c_libs)) - ext.libraries = new_c_libs - ext.library_dirs = ext.library_dirs + c_lib_dirs - if macros: - log.info('extending extension %r defined_macros with %r' - % (ext.name, macros)) - ext.define_macros = ext.define_macros + macros - - # determine extension languages - if has_f_sources(ext.sources): - ext_languages.add('f77') - if has_cxx_sources(ext.sources): - ext_languages.add('c++') - l = ext.language or self.compiler.detect_language(ext.sources) - if l: - ext_languages.add(l) - - # reset language attribute for choosing proper linker - # - # When we build extensions with multiple languages, we have to - # choose a linker. The rules here are: - # 1. if there is Fortran code, always prefer the Fortran linker, - # 2. otherwise prefer C++ over C, - # 3. Users can force a particular linker by using - # `language='c'` # or 'c++', 'f90', 'f77' - # in their config.add_extension() calls. - if 'c++' in ext_languages: - ext_language = 'c++' - else: - ext_language = 'c' # default - - has_fortran = False - if 'f90' in ext_languages: - ext_language = 'f90' - has_fortran = True - elif 'f77' in ext_languages: - ext_language = 'f77' - has_fortran = True - - if not ext.language or has_fortran: - if l and l != ext_language and ext.language: - log.warn('resetting extension %r language from %r to %r.' % - (ext.name, l, ext_language)) - - ext.language = ext_language - - # global language - all_languages.update(ext_languages) - - need_f90_compiler = 'f90' in all_languages - need_f77_compiler = 'f77' in all_languages - need_cxx_compiler = 'c++' in all_languages - - # Initialize C++ compiler: - if need_cxx_compiler: - self._cxx_compiler = new_compiler(compiler=compiler_type, - verbose=self.verbose, - dry_run=self.dry_run, - force=self.force) - compiler = self._cxx_compiler - compiler.customize(self.distribution, need_cxx=need_cxx_compiler) - compiler.customize_cmd(self) - compiler.show_customization() - self._cxx_compiler = compiler.cxx_compiler() - else: - self._cxx_compiler = None - - # Initialize Fortran 77 compiler: - if need_f77_compiler: - ctype = self.fcompiler - self._f77_compiler = new_fcompiler(compiler=self.fcompiler, - verbose=self.verbose, - dry_run=self.dry_run, - force=self.force, - requiref90=False, - c_compiler=self.compiler) - fcompiler = self._f77_compiler - if fcompiler: - ctype = fcompiler.compiler_type - fcompiler.customize(self.distribution) - if fcompiler and fcompiler.get_version(): - fcompiler.customize_cmd(self) - fcompiler.show_customization() - else: - self.warn('f77_compiler=%s is not available.' % - (ctype)) - self._f77_compiler = None - else: - self._f77_compiler = None - - # Initialize Fortran 90 compiler: - if need_f90_compiler: - ctype = self.fcompiler - self._f90_compiler = new_fcompiler(compiler=self.fcompiler, - verbose=self.verbose, - dry_run=self.dry_run, - force=self.force, - requiref90=True, - c_compiler=self.compiler) - fcompiler = self._f90_compiler - if fcompiler: - ctype = fcompiler.compiler_type - fcompiler.customize(self.distribution) - if fcompiler and fcompiler.get_version(): - fcompiler.customize_cmd(self) - fcompiler.show_customization() - else: - self.warn('f90_compiler=%s is not available.' % - (ctype)) - self._f90_compiler = None - else: - self._f90_compiler = None - - # Build extensions - self.build_extensions() - - # Copy over any extra DLL files - # FIXME: In the case where there are more than two packages, - # we blindly assume that both packages need all of the libraries, - # resulting in a larger wheel than is required. This should be fixed, - # but it's so rare that I won't bother to handle it. - pkg_roots = { - self.get_ext_fullname(ext.name).split('.')[0] - for ext in self.extensions - } - for pkg_root in pkg_roots: - shared_lib_dir = os.path.join(pkg_root, '.libs') - if not self.inplace: - shared_lib_dir = os.path.join(self.build_lib, shared_lib_dir) - for fn in os.listdir(self.extra_dll_dir): - if not os.path.isdir(shared_lib_dir): - os.makedirs(shared_lib_dir) - if not fn.lower().endswith('.dll'): - continue - runtime_lib = os.path.join(self.extra_dll_dir, fn) - copy_file(runtime_lib, shared_lib_dir) - - def swig_sources(self, sources, extensions=None): - # Do nothing. Swig sources have been handled in build_src command. - return sources - - def build_extension(self, ext): - sources = ext.sources - if sources is None or not is_sequence(sources): - raise DistutilsSetupError( - ("in 'ext_modules' option (extension '%s'), " - "'sources' must be present and must be " - "a list of source filenames") % ext.name) - sources = list(sources) - - if not sources: - return - - fullname = self.get_ext_fullname(ext.name) - if self.inplace: - modpath = fullname.split('.') - package = '.'.join(modpath[0:-1]) - base = modpath[-1] - build_py = self.get_finalized_command('build_py') - package_dir = build_py.get_package_dir(package) - ext_filename = os.path.join(package_dir, - self.get_ext_filename(base)) - else: - ext_filename = os.path.join(self.build_lib, - self.get_ext_filename(fullname)) - depends = sources + ext.depends - - force_rebuild = self.force - if not self.disable_optimization and not self.compiler_opt.is_cached(): - log.debug("Detected changes on compiler optimizations") - force_rebuild = True - if not (force_rebuild or newer_group(depends, ext_filename, 'newer')): - log.debug("skipping '%s' extension (up-to-date)", ext.name) - return - else: - log.info("building '%s' extension", ext.name) - - extra_args = ext.extra_compile_args or [] - extra_cflags = getattr(ext, 'extra_c_compile_args', None) or [] - extra_cxxflags = getattr(ext, 'extra_cxx_compile_args', None) or [] - - macros = ext.define_macros[:] - for undef in ext.undef_macros: - macros.append((undef,)) - - c_sources, cxx_sources, f_sources, fmodule_sources = \ - filter_sources(ext.sources) - - if self.compiler.compiler_type == 'msvc': - if cxx_sources: - # Needed to compile kiva.agg._agg extension. - extra_args.append('/Zm1000') - extra_cflags += extra_cxxflags - # this hack works around the msvc compiler attributes - # problem, msvc uses its own convention :( - c_sources += cxx_sources - cxx_sources = [] - - # Set Fortran/C++ compilers for compilation and linking. - if ext.language == 'f90': - fcompiler = self._f90_compiler - elif ext.language == 'f77': - fcompiler = self._f77_compiler - else: # in case ext.language is c++, for instance - fcompiler = self._f90_compiler or self._f77_compiler - if fcompiler is not None: - fcompiler.extra_f77_compile_args = (ext.extra_f77_compile_args or []) if hasattr( - ext, 'extra_f77_compile_args') else [] - fcompiler.extra_f90_compile_args = (ext.extra_f90_compile_args or []) if hasattr( - ext, 'extra_f90_compile_args') else [] - cxx_compiler = self._cxx_compiler - - # check for the availability of required compilers - if cxx_sources and cxx_compiler is None: - raise DistutilsError("extension %r has C++ sources" - "but no C++ compiler found" % (ext.name)) - if (f_sources or fmodule_sources) and fcompiler is None: - raise DistutilsError("extension %r has Fortran sources " - "but no Fortran compiler found" % (ext.name)) - if ext.language in ['f77', 'f90'] and fcompiler is None: - self.warn("extension %r has Fortran libraries " - "but no Fortran linker found, using default linker" % (ext.name)) - if ext.language == 'c++' and cxx_compiler is None: - self.warn("extension %r has C++ libraries " - "but no C++ linker found, using default linker" % (ext.name)) - - kws = {'depends': ext.depends} - output_dir = self.build_temp - - include_dirs = ext.include_dirs + get_numpy_include_dirs() - - # filtering C dispatch-table sources when optimization is not disabled, - # otherwise treated as normal sources. - copt_c_sources = [] - copt_cxx_sources = [] - copt_baseline_flags = [] - copt_macros = [] - if not self.disable_optimization: - bsrc_dir = self.get_finalized_command("build_src").build_src - dispatch_hpath = os.path.join("numpy", "distutils", "include") - dispatch_hpath = os.path.join(bsrc_dir, dispatch_hpath) - include_dirs.append(dispatch_hpath) - - # copt_build_src = None if self.inplace else bsrc_dir - # Always generate the generated config files and - # dispatch-able sources inside the build directory, - # even if the build option `inplace` is enabled. - # This approach prevents conflicts with Meson-generated - # config headers. Since `spin build --clean` will not remove - # these headers, they might overwrite the generated Meson headers, - # causing compatibility issues. Maintaining separate directories - # ensures compatibility between distutils dispatch config headers - # and Meson headers, avoiding build disruptions. - # See gh-24450 for more details. - copt_build_src = bsrc_dir - for _srcs, _dst, _ext in ( - ((c_sources,), copt_c_sources, ('.dispatch.c',)), - ((c_sources, cxx_sources), copt_cxx_sources, - ('.dispatch.cpp', '.dispatch.cxx')) - ): - for _src in _srcs: - _dst += [ - _src.pop(_src.index(s)) - for s in _src[:] if s.endswith(_ext) - ] - copt_baseline_flags = self.compiler_opt.cpu_baseline_flags() - else: - copt_macros.append(("NPY_DISABLE_OPTIMIZATION", 1)) - - c_objects = [] - if copt_cxx_sources: - log.info("compiling C++ dispatch-able sources") - c_objects += self.compiler_opt.try_dispatch( - copt_cxx_sources, - output_dir=output_dir, - src_dir=copt_build_src, - macros=macros + copt_macros, - include_dirs=include_dirs, - debug=self.debug, - extra_postargs=extra_args + extra_cxxflags, - ccompiler=cxx_compiler, - **kws - ) - if copt_c_sources: - log.info("compiling C dispatch-able sources") - c_objects += self.compiler_opt.try_dispatch( - copt_c_sources, - output_dir=output_dir, - src_dir=copt_build_src, - macros=macros + copt_macros, - include_dirs=include_dirs, - debug=self.debug, - extra_postargs=extra_args + extra_cflags, - **kws) - if c_sources: - log.info("compiling C sources") - c_objects += self.compiler.compile( - c_sources, - output_dir=output_dir, - macros=macros + copt_macros, - include_dirs=include_dirs, - debug=self.debug, - extra_postargs=(extra_args + copt_baseline_flags + - extra_cflags), - **kws) - if cxx_sources: - log.info("compiling C++ sources") - c_objects += cxx_compiler.compile( - cxx_sources, - output_dir=output_dir, - macros=macros + copt_macros, - include_dirs=include_dirs, - debug=self.debug, - extra_postargs=(extra_args + copt_baseline_flags + - extra_cxxflags), - **kws) - - extra_postargs = [] - f_objects = [] - if fmodule_sources: - log.info("compiling Fortran 90 module sources") - module_dirs = ext.module_dirs[:] - module_build_dir = os.path.join( - self.build_temp, os.path.dirname( - self.get_ext_filename(fullname))) - - self.mkpath(module_build_dir) - if fcompiler.module_dir_switch is None: - existing_modules = glob('*.mod') - extra_postargs += fcompiler.module_options( - module_dirs, module_build_dir) - f_objects += fcompiler.compile(fmodule_sources, - output_dir=self.build_temp, - macros=macros, - include_dirs=include_dirs, - debug=self.debug, - extra_postargs=extra_postargs, - depends=ext.depends) - - if fcompiler.module_dir_switch is None: - for f in glob('*.mod'): - if f in existing_modules: - continue - t = os.path.join(module_build_dir, f) - if os.path.abspath(f) == os.path.abspath(t): - continue - if os.path.isfile(t): - os.remove(t) - try: - self.move_file(f, module_build_dir) - except DistutilsFileError: - log.warn('failed to move %r to %r' % - (f, module_build_dir)) - if f_sources: - log.info("compiling Fortran sources") - f_objects += fcompiler.compile(f_sources, - output_dir=self.build_temp, - macros=macros, - include_dirs=include_dirs, - debug=self.debug, - extra_postargs=extra_postargs, - depends=ext.depends) - - if f_objects and not fcompiler.can_ccompiler_link(self.compiler): - unlinkable_fobjects = f_objects - objects = c_objects - else: - unlinkable_fobjects = [] - objects = c_objects + f_objects - - if ext.extra_objects: - objects.extend(ext.extra_objects) - extra_args = ext.extra_link_args or [] - libraries = self.get_libraries(ext)[:] - library_dirs = ext.library_dirs[:] - - linker = self.compiler.link_shared_object - # Always use system linker when using MSVC compiler. - if self.compiler.compiler_type in ('msvc', 'intelw', 'intelemw'): - # expand libraries with fcompiler libraries as we are - # not using fcompiler linker - self._libs_with_msvc_and_fortran( - fcompiler, libraries, library_dirs) - if ext.runtime_library_dirs: - # gcc adds RPATH to the link. On windows, copy the dll into - # self.extra_dll_dir instead. - for d in ext.runtime_library_dirs: - for f in glob(d + '/*.dll'): - copy_file(f, self.extra_dll_dir) - ext.runtime_library_dirs = [] - - elif ext.language in ['f77', 'f90'] and fcompiler is not None: - linker = fcompiler.link_shared_object - if ext.language == 'c++' and cxx_compiler is not None: - linker = cxx_compiler.link_shared_object - - if fcompiler is not None: - objects, libraries = self._process_unlinkable_fobjects( - objects, libraries, - fcompiler, library_dirs, - unlinkable_fobjects) - - linker(objects, ext_filename, - libraries=libraries, - library_dirs=library_dirs, - runtime_library_dirs=ext.runtime_library_dirs, - extra_postargs=extra_args, - export_symbols=self.get_export_symbols(ext), - debug=self.debug, - build_temp=self.build_temp, - target_lang=ext.language) - - def _add_dummy_mingwex_sym(self, c_sources): - build_src = self.get_finalized_command("build_src").build_src - build_clib = self.get_finalized_command("build_clib").build_clib - objects = self.compiler.compile([os.path.join(build_src, - "gfortran_vs2003_hack.c")], - output_dir=self.build_temp) - self.compiler.create_static_lib( - objects, "_gfortran_workaround", output_dir=build_clib, debug=self.debug) - - def _process_unlinkable_fobjects(self, objects, libraries, - fcompiler, library_dirs, - unlinkable_fobjects): - libraries = list(libraries) - objects = list(objects) - unlinkable_fobjects = list(unlinkable_fobjects) - - # Expand possible fake static libraries to objects; - # make sure to iterate over a copy of the list as - # "fake" libraries will be removed as they are - # encountered - for lib in libraries[:]: - for libdir in library_dirs: - fake_lib = os.path.join(libdir, lib + '.fobjects') - if os.path.isfile(fake_lib): - # Replace fake static library - libraries.remove(lib) - with open(fake_lib) as f: - unlinkable_fobjects.extend(f.read().splitlines()) - - # Expand C objects - c_lib = os.path.join(libdir, lib + '.cobjects') - with open(c_lib) as f: - objects.extend(f.read().splitlines()) - - # Wrap unlinkable objects to a linkable one - if unlinkable_fobjects: - fobjects = [os.path.abspath(obj) for obj in unlinkable_fobjects] - wrapped = fcompiler.wrap_unlinkable_objects( - fobjects, output_dir=self.build_temp, - extra_dll_dir=self.extra_dll_dir) - objects.extend(wrapped) - - return objects, libraries - - def _libs_with_msvc_and_fortran(self, fcompiler, c_libraries, - c_library_dirs): - if fcompiler is None: - return - - for libname in c_libraries: - if libname.startswith('msvc'): - continue - fileexists = False - for libdir in c_library_dirs or []: - libfile = os.path.join(libdir, '%s.lib' % (libname)) - if os.path.isfile(libfile): - fileexists = True - break - if fileexists: - continue - # make g77-compiled static libs available to MSVC - fileexists = False - for libdir in c_library_dirs: - libfile = os.path.join(libdir, 'lib%s.a' % (libname)) - if os.path.isfile(libfile): - # copy libname.a file to name.lib so that MSVC linker - # can find it - libfile2 = os.path.join(self.build_temp, libname + '.lib') - copy_file(libfile, libfile2) - if self.build_temp not in c_library_dirs: - c_library_dirs.append(self.build_temp) - fileexists = True - break - if fileexists: - continue - log.warn('could not find library %r in directories %s' - % (libname, c_library_dirs)) - - # Always use system linker when using MSVC compiler. - f_lib_dirs = [] - for dir in fcompiler.library_dirs: - # correct path when compiling in Cygwin but with normal Win - # Python - if dir.startswith('/usr/lib'): - try: - dir = subprocess.check_output(['cygpath', '-w', dir]) - except (OSError, subprocess.CalledProcessError): - pass - else: - dir = filepath_from_subprocess_output(dir) - f_lib_dirs.append(dir) - c_library_dirs.extend(f_lib_dirs) - - # make g77-compiled static libs available to MSVC - for lib in fcompiler.libraries: - if not lib.startswith('msvc'): - c_libraries.append(lib) - p = combine_paths(f_lib_dirs, 'lib' + lib + '.a') - if p: - dst_name = os.path.join(self.build_temp, lib + '.lib') - if not os.path.isfile(dst_name): - copy_file(p[0], dst_name) - if self.build_temp not in c_library_dirs: - c_library_dirs.append(self.build_temp) - - def get_source_files(self): - self.check_extensions_list(self.extensions) - filenames = [] - for ext in self.extensions: - filenames.extend(get_ext_source_files(ext)) - return filenames - - def get_outputs(self): - self.check_extensions_list(self.extensions) - - outputs = [] - for ext in self.extensions: - if not ext.sources: - continue - fullname = self.get_ext_fullname(ext.name) - outputs.append(os.path.join(self.build_lib, - self.get_ext_filename(fullname))) - return outputs diff --git a/numpy/distutils/command/build_py.py b/numpy/distutils/command/build_py.py deleted file mode 100644 index d30dc5bf42d8..000000000000 --- a/numpy/distutils/command/build_py.py +++ /dev/null @@ -1,31 +0,0 @@ -from distutils.command.build_py import build_py as old_build_py -from numpy.distutils.misc_util import is_string - -class build_py(old_build_py): - - def run(self): - build_src = self.get_finalized_command('build_src') - if build_src.py_modules_dict and self.packages is None: - self.packages = list(build_src.py_modules_dict.keys ()) - old_build_py.run(self) - - def find_package_modules(self, package, package_dir): - modules = old_build_py.find_package_modules(self, package, package_dir) - - # Find build_src generated *.py files. - build_src = self.get_finalized_command('build_src') - modules += build_src.py_modules_dict.get(package, []) - - return modules - - def find_modules(self): - old_py_modules = self.py_modules[:] - new_py_modules = [_m for _m in self.py_modules if is_string(_m)] - self.py_modules[:] = new_py_modules - modules = old_build_py.find_modules(self) - self.py_modules[:] = old_py_modules - - return modules - - # XXX: Fix find_source_files for item in py_modules such that item is 3-tuple - # and item[2] is source file. diff --git a/numpy/distutils/command/build_scripts.py b/numpy/distutils/command/build_scripts.py deleted file mode 100644 index d5cadb2745fe..000000000000 --- a/numpy/distutils/command/build_scripts.py +++ /dev/null @@ -1,49 +0,0 @@ -""" Modified version of build_scripts that handles building scripts from functions. - -""" -from distutils.command.build_scripts import build_scripts as old_build_scripts -from numpy.distutils import log -from numpy.distutils.misc_util import is_string - -class build_scripts(old_build_scripts): - - def generate_scripts(self, scripts): - new_scripts = [] - func_scripts = [] - for script in scripts: - if is_string(script): - new_scripts.append(script) - else: - func_scripts.append(script) - if not func_scripts: - return new_scripts - - build_dir = self.build_dir - self.mkpath(build_dir) - for func in func_scripts: - script = func(build_dir) - if not script: - continue - if is_string(script): - log.info(" adding '%s' to scripts" % (script,)) - new_scripts.append(script) - else: - [log.info(" adding '%s' to scripts" % (s,)) for s in script] - new_scripts.extend(list(script)) - return new_scripts - - def run (self): - if not self.scripts: - return - - self.scripts = self.generate_scripts(self.scripts) - # Now make sure that the distribution object has this list of scripts. - # setuptools' develop command requires that this be a list of filenames, - # not functions. - self.distribution.scripts = self.scripts - - return old_build_scripts.run(self) - - def get_source_files(self): - from numpy.distutils.misc_util import get_script_files - return get_script_files(self.scripts) diff --git a/numpy/distutils/command/build_src.py b/numpy/distutils/command/build_src.py deleted file mode 100644 index cfcc80caecd6..000000000000 --- a/numpy/distutils/command/build_src.py +++ /dev/null @@ -1,773 +0,0 @@ -""" Build swig and f2py sources. -""" -import os -import re -import sys -import shlex -import copy - -from distutils.command import build_ext -from distutils.dep_util import newer_group, newer -from distutils.util import get_platform -from distutils.errors import DistutilsError, DistutilsSetupError - - -# this import can't be done here, as it uses numpy stuff only available -# after it's installed -#import numpy.f2py -from numpy.distutils import log -from numpy.distutils.misc_util import ( - fortran_ext_match, appendpath, is_string, is_sequence, get_cmd - ) -from numpy.distutils.from_template import process_file as process_f_file -from numpy.distutils.conv_template import process_file as process_c_file - -def subst_vars(target, source, d): - """Substitute any occurrence of @foo@ by d['foo'] from source file into - target.""" - var = re.compile('@([a-zA-Z_]+)@') - with open(source, 'r') as fs: - with open(target, 'w') as ft: - for l in fs: - m = var.search(l) - if m: - ft.write(l.replace('@%s@' % m.group(1), d[m.group(1)])) - else: - ft.write(l) - -class build_src(build_ext.build_ext): - - description = "build sources from SWIG, F2PY files or a function" - - user_options = [ - ('build-src=', 'd', "directory to \"build\" sources to"), - ('f2py-opts=', None, "list of f2py command line options"), - ('swig=', None, "path to the SWIG executable"), - ('swig-opts=', None, "list of SWIG command line options"), - ('swig-cpp', None, "make SWIG create C++ files (default is autodetected from sources)"), - ('f2pyflags=', None, "additional flags to f2py (use --f2py-opts= instead)"), # obsolete - ('swigflags=', None, "additional flags to swig (use --swig-opts= instead)"), # obsolete - ('force', 'f', "forcibly build everything (ignore file timestamps)"), - ('inplace', 'i', - "ignore build-lib and put compiled extensions into the source " - "directory alongside your pure Python modules"), - ('verbose-cfg', None, - "change logging level from WARN to INFO which will show all " - "compiler output") - ] - - boolean_options = ['force', 'inplace', 'verbose-cfg'] - - help_options = [] - - def initialize_options(self): - self.extensions = None - self.package = None - self.py_modules = None - self.py_modules_dict = None - self.build_src = None - self.build_lib = None - self.build_base = None - self.force = None - self.inplace = None - self.package_dir = None - self.f2pyflags = None # obsolete - self.f2py_opts = None - self.swigflags = None # obsolete - self.swig_opts = None - self.swig_cpp = None - self.swig = None - self.verbose_cfg = None - - def finalize_options(self): - self.set_undefined_options('build', - ('build_base', 'build_base'), - ('build_lib', 'build_lib'), - ('force', 'force')) - if self.package is None: - self.package = self.distribution.ext_package - self.extensions = self.distribution.ext_modules - self.libraries = self.distribution.libraries or [] - self.py_modules = self.distribution.py_modules or [] - self.data_files = self.distribution.data_files or [] - - if self.build_src is None: - plat_specifier = ".{}-{}.{}".format(get_platform(), *sys.version_info[:2]) - self.build_src = os.path.join(self.build_base, 'src'+plat_specifier) - - # py_modules_dict is used in build_py.find_package_modules - self.py_modules_dict = {} - - if self.f2pyflags: - if self.f2py_opts: - log.warn('ignoring --f2pyflags as --f2py-opts already used') - else: - self.f2py_opts = self.f2pyflags - self.f2pyflags = None - if self.f2py_opts is None: - self.f2py_opts = [] - else: - self.f2py_opts = shlex.split(self.f2py_opts) - - if self.swigflags: - if self.swig_opts: - log.warn('ignoring --swigflags as --swig-opts already used') - else: - self.swig_opts = self.swigflags - self.swigflags = None - - if self.swig_opts is None: - self.swig_opts = [] - else: - self.swig_opts = shlex.split(self.swig_opts) - - # use options from build_ext command - build_ext = self.get_finalized_command('build_ext') - if self.inplace is None: - self.inplace = build_ext.inplace - if self.swig_cpp is None: - self.swig_cpp = build_ext.swig_cpp - for c in ['swig', 'swig_opt']: - o = '--'+c.replace('_', '-') - v = getattr(build_ext, c, None) - if v: - if getattr(self, c): - log.warn('both build_src and build_ext define %s option' % (o)) - else: - log.info('using "%s=%s" option from build_ext command' % (o, v)) - setattr(self, c, v) - - def run(self): - log.info("build_src") - if not (self.extensions or self.libraries): - return - self.build_sources() - - def build_sources(self): - - if self.inplace: - self.get_package_dir = \ - self.get_finalized_command('build_py').get_package_dir - - self.build_py_modules_sources() - - for libname_info in self.libraries: - self.build_library_sources(*libname_info) - - if self.extensions: - self.check_extensions_list(self.extensions) - - for ext in self.extensions: - self.build_extension_sources(ext) - - self.build_data_files_sources() - self.build_npy_pkg_config() - - def build_data_files_sources(self): - if not self.data_files: - return - log.info('building data_files sources') - from numpy.distutils.misc_util import get_data_files - new_data_files = [] - for data in self.data_files: - if isinstance(data, str): - new_data_files.append(data) - elif isinstance(data, tuple): - d, files = data - if self.inplace: - build_dir = self.get_package_dir('.'.join(d.split(os.sep))) - else: - build_dir = os.path.join(self.build_src, d) - funcs = [f for f in files if hasattr(f, '__call__')] - files = [f for f in files if not hasattr(f, '__call__')] - for f in funcs: - if f.__code__.co_argcount==1: - s = f(build_dir) - else: - s = f() - if s is not None: - if isinstance(s, list): - files.extend(s) - elif isinstance(s, str): - files.append(s) - else: - raise TypeError(repr(s)) - filenames = get_data_files((d, files)) - new_data_files.append((d, filenames)) - else: - raise TypeError(repr(data)) - self.data_files[:] = new_data_files - - - def _build_npy_pkg_config(self, info, gd): - template, install_dir, subst_dict = info - template_dir = os.path.dirname(template) - for k, v in gd.items(): - subst_dict[k] = v - - if self.inplace == 1: - generated_dir = os.path.join(template_dir, install_dir) - else: - generated_dir = os.path.join(self.build_src, template_dir, - install_dir) - generated = os.path.basename(os.path.splitext(template)[0]) - generated_path = os.path.join(generated_dir, generated) - if not os.path.exists(generated_dir): - os.makedirs(generated_dir) - - subst_vars(generated_path, template, subst_dict) - - # Where to install relatively to install prefix - full_install_dir = os.path.join(template_dir, install_dir) - return full_install_dir, generated_path - - def build_npy_pkg_config(self): - log.info('build_src: building npy-pkg config files') - - # XXX: another ugly workaround to circumvent distutils brain damage. We - # need the install prefix here, but finalizing the options of the - # install command when only building sources cause error. Instead, we - # copy the install command instance, and finalize the copy so that it - # does not disrupt how distutils want to do things when with the - # original install command instance. - install_cmd = copy.copy(get_cmd('install')) - if not install_cmd.finalized == 1: - install_cmd.finalize_options() - build_npkg = False - if self.inplace == 1: - top_prefix = '.' - build_npkg = True - elif hasattr(install_cmd, 'install_libbase'): - top_prefix = install_cmd.install_libbase - build_npkg = True - - if build_npkg: - for pkg, infos in self.distribution.installed_pkg_config.items(): - pkg_path = self.distribution.package_dir[pkg] - prefix = os.path.join(os.path.abspath(top_prefix), pkg_path) - d = {'prefix': prefix} - for info in infos: - install_dir, generated = self._build_npy_pkg_config(info, d) - self.distribution.data_files.append((install_dir, - [generated])) - - def build_py_modules_sources(self): - if not self.py_modules: - return - log.info('building py_modules sources') - new_py_modules = [] - for source in self.py_modules: - if is_sequence(source) and len(source)==3: - package, module_base, source = source - if self.inplace: - build_dir = self.get_package_dir(package) - else: - build_dir = os.path.join(self.build_src, - os.path.join(*package.split('.'))) - if hasattr(source, '__call__'): - target = os.path.join(build_dir, module_base + '.py') - source = source(target) - if source is None: - continue - modules = [(package, module_base, source)] - if package not in self.py_modules_dict: - self.py_modules_dict[package] = [] - self.py_modules_dict[package] += modules - else: - new_py_modules.append(source) - self.py_modules[:] = new_py_modules - - def build_library_sources(self, lib_name, build_info): - sources = list(build_info.get('sources', [])) - - if not sources: - return - - log.info('building library "%s" sources' % (lib_name)) - - sources = self.generate_sources(sources, (lib_name, build_info)) - - sources = self.template_sources(sources, (lib_name, build_info)) - - sources, h_files = self.filter_h_files(sources) - - if h_files: - log.info('%s - nothing done with h_files = %s', - self.package, h_files) - - #for f in h_files: - # self.distribution.headers.append((lib_name,f)) - - build_info['sources'] = sources - return - - def build_extension_sources(self, ext): - - sources = list(ext.sources) - - log.info('building extension "%s" sources' % (ext.name)) - - fullname = self.get_ext_fullname(ext.name) - - modpath = fullname.split('.') - package = '.'.join(modpath[0:-1]) - - if self.inplace: - self.ext_target_dir = self.get_package_dir(package) - - sources = self.generate_sources(sources, ext) - sources = self.template_sources(sources, ext) - sources = self.swig_sources(sources, ext) - sources = self.f2py_sources(sources, ext) - sources = self.pyrex_sources(sources, ext) - - sources, py_files = self.filter_py_files(sources) - - if package not in self.py_modules_dict: - self.py_modules_dict[package] = [] - modules = [] - for f in py_files: - module = os.path.splitext(os.path.basename(f))[0] - modules.append((package, module, f)) - self.py_modules_dict[package] += modules - - sources, h_files = self.filter_h_files(sources) - - if h_files: - log.info('%s - nothing done with h_files = %s', - package, h_files) - #for f in h_files: - # self.distribution.headers.append((package,f)) - - ext.sources = sources - - def generate_sources(self, sources, extension): - new_sources = [] - func_sources = [] - for source in sources: - if is_string(source): - new_sources.append(source) - else: - func_sources.append(source) - if not func_sources: - return new_sources - if self.inplace and not is_sequence(extension): - build_dir = self.ext_target_dir - else: - if is_sequence(extension): - name = extension[0] - # if 'include_dirs' not in extension[1]: - # extension[1]['include_dirs'] = [] - # incl_dirs = extension[1]['include_dirs'] - else: - name = extension.name - # incl_dirs = extension.include_dirs - #if self.build_src not in incl_dirs: - # incl_dirs.append(self.build_src) - build_dir = os.path.join(*([self.build_src] - +name.split('.')[:-1])) - self.mkpath(build_dir) - - if self.verbose_cfg: - new_level = log.INFO - else: - new_level = log.WARN - old_level = log.set_threshold(new_level) - - for func in func_sources: - source = func(extension, build_dir) - if not source: - continue - if is_sequence(source): - [log.info(" adding '%s' to sources." % (s,)) for s in source] - new_sources.extend(source) - else: - log.info(" adding '%s' to sources." % (source,)) - new_sources.append(source) - log.set_threshold(old_level) - return new_sources - - def filter_py_files(self, sources): - return self.filter_files(sources, ['.py']) - - def filter_h_files(self, sources): - return self.filter_files(sources, ['.h', '.hpp', '.inc']) - - def filter_files(self, sources, exts = []): - new_sources = [] - files = [] - for source in sources: - (base, ext) = os.path.splitext(source) - if ext in exts: - files.append(source) - else: - new_sources.append(source) - return new_sources, files - - def template_sources(self, sources, extension): - new_sources = [] - if is_sequence(extension): - depends = extension[1].get('depends') - include_dirs = extension[1].get('include_dirs') - else: - depends = extension.depends - include_dirs = extension.include_dirs - for source in sources: - (base, ext) = os.path.splitext(source) - if ext == '.src': # Template file - if self.inplace: - target_dir = os.path.dirname(base) - else: - target_dir = appendpath(self.build_src, os.path.dirname(base)) - self.mkpath(target_dir) - target_file = os.path.join(target_dir, os.path.basename(base)) - if (self.force or newer_group([source] + depends, target_file)): - if _f_pyf_ext_match(base): - log.info("from_template:> %s" % (target_file)) - outstr = process_f_file(source) - else: - log.info("conv_template:> %s" % (target_file)) - outstr = process_c_file(source) - with open(target_file, 'w') as fid: - fid.write(outstr) - if _header_ext_match(target_file): - d = os.path.dirname(target_file) - if d not in include_dirs: - log.info(" adding '%s' to include_dirs." % (d)) - include_dirs.append(d) - new_sources.append(target_file) - else: - new_sources.append(source) - return new_sources - - def pyrex_sources(self, sources, extension): - """Pyrex not supported; this remains for Cython support (see below)""" - new_sources = [] - ext_name = extension.name.split('.')[-1] - for source in sources: - (base, ext) = os.path.splitext(source) - if ext == '.pyx': - target_file = self.generate_a_pyrex_source(base, ext_name, - source, - extension) - new_sources.append(target_file) - else: - new_sources.append(source) - return new_sources - - def generate_a_pyrex_source(self, base, ext_name, source, extension): - """Pyrex is not supported, but some projects monkeypatch this method. - - That allows compiling Cython code, see gh-6955. - This method will remain here for compatibility reasons. - """ - return [] - - def f2py_sources(self, sources, extension): - new_sources = [] - f2py_sources = [] - f_sources = [] - f2py_targets = {} - target_dirs = [] - ext_name = extension.name.split('.')[-1] - skip_f2py = 0 - - for source in sources: - (base, ext) = os.path.splitext(source) - if ext == '.pyf': # F2PY interface file - if self.inplace: - target_dir = os.path.dirname(base) - else: - target_dir = appendpath(self.build_src, os.path.dirname(base)) - if os.path.isfile(source): - name = get_f2py_modulename(source) - if name != ext_name: - raise DistutilsSetupError('mismatch of extension names: %s ' - 'provides %r but expected %r' % ( - source, name, ext_name)) - target_file = os.path.join(target_dir, name+'module.c') - else: - log.debug(' source %s does not exist: skipping f2py\'ing.' \ - % (source)) - name = ext_name - skip_f2py = 1 - target_file = os.path.join(target_dir, name+'module.c') - if not os.path.isfile(target_file): - log.warn(' target %s does not exist:\n '\ - 'Assuming %smodule.c was generated with '\ - '"build_src --inplace" command.' \ - % (target_file, name)) - target_dir = os.path.dirname(base) - target_file = os.path.join(target_dir, name+'module.c') - if not os.path.isfile(target_file): - raise DistutilsSetupError("%r missing" % (target_file,)) - log.info(' Yes! Using %r as up-to-date target.' \ - % (target_file)) - target_dirs.append(target_dir) - f2py_sources.append(source) - f2py_targets[source] = target_file - new_sources.append(target_file) - elif fortran_ext_match(ext): - f_sources.append(source) - else: - new_sources.append(source) - - if not (f2py_sources or f_sources): - return new_sources - - for d in target_dirs: - self.mkpath(d) - - f2py_options = extension.f2py_options + self.f2py_opts - - if self.distribution.libraries: - for name, build_info in self.distribution.libraries: - if name in extension.libraries: - f2py_options.extend(build_info.get('f2py_options', [])) - - log.info("f2py options: %s" % (f2py_options)) - - if f2py_sources: - if len(f2py_sources) != 1: - raise DistutilsSetupError( - 'only one .pyf file is allowed per extension module but got'\ - ' more: %r' % (f2py_sources,)) - source = f2py_sources[0] - target_file = f2py_targets[source] - target_dir = os.path.dirname(target_file) or '.' - depends = [source] + extension.depends - if (self.force or newer_group(depends, target_file, 'newer')) \ - and not skip_f2py: - log.info("f2py: %s" % (source)) - from numpy.f2py import f2py2e - f2py2e.run_main(f2py_options - + ['--build-dir', target_dir, source]) - else: - log.debug(" skipping '%s' f2py interface (up-to-date)" % (source)) - else: - #XXX TODO: --inplace support for sdist command - if is_sequence(extension): - name = extension[0] - else: name = extension.name - target_dir = os.path.join(*([self.build_src] - +name.split('.')[:-1])) - target_file = os.path.join(target_dir, ext_name + 'module.c') - new_sources.append(target_file) - depends = f_sources + extension.depends - if (self.force or newer_group(depends, target_file, 'newer')) \ - and not skip_f2py: - log.info("f2py:> %s" % (target_file)) - self.mkpath(target_dir) - from numpy.f2py import f2py2e - f2py2e.run_main(f2py_options + ['--lower', - '--build-dir', target_dir]+\ - ['-m', ext_name]+f_sources) - else: - log.debug(" skipping f2py fortran files for '%s' (up-to-date)"\ - % (target_file)) - - if not os.path.isfile(target_file): - raise DistutilsError("f2py target file %r not generated" % (target_file,)) - - build_dir = os.path.join(self.build_src, target_dir) - target_c = os.path.join(build_dir, 'fortranobject.c') - target_h = os.path.join(build_dir, 'fortranobject.h') - log.info(" adding '%s' to sources." % (target_c)) - new_sources.append(target_c) - if build_dir not in extension.include_dirs: - log.info(" adding '%s' to include_dirs." % (build_dir)) - extension.include_dirs.append(build_dir) - - if not skip_f2py: - import numpy.f2py - d = os.path.dirname(numpy.f2py.__file__) - source_c = os.path.join(d, 'src', 'fortranobject.c') - source_h = os.path.join(d, 'src', 'fortranobject.h') - if newer(source_c, target_c) or newer(source_h, target_h): - self.mkpath(os.path.dirname(target_c)) - self.copy_file(source_c, target_c) - self.copy_file(source_h, target_h) - else: - if not os.path.isfile(target_c): - raise DistutilsSetupError("f2py target_c file %r not found" % (target_c,)) - if not os.path.isfile(target_h): - raise DistutilsSetupError("f2py target_h file %r not found" % (target_h,)) - - for name_ext in ['-f2pywrappers.f', '-f2pywrappers2.f90']: - filename = os.path.join(target_dir, ext_name + name_ext) - if os.path.isfile(filename): - log.info(" adding '%s' to sources." % (filename)) - f_sources.append(filename) - - return new_sources + f_sources - - def swig_sources(self, sources, extension): - # Assuming SWIG 1.3.14 or later. See compatibility note in - # http://www.swig.org/Doc1.3/Python.html#Python_nn6 - - new_sources = [] - swig_sources = [] - swig_targets = {} - target_dirs = [] - py_files = [] # swig generated .py files - target_ext = '.c' - if '-c++' in extension.swig_opts: - typ = 'c++' - is_cpp = True - extension.swig_opts.remove('-c++') - elif self.swig_cpp: - typ = 'c++' - is_cpp = True - else: - typ = None - is_cpp = False - skip_swig = 0 - ext_name = extension.name.split('.')[-1] - - for source in sources: - (base, ext) = os.path.splitext(source) - if ext == '.i': # SWIG interface file - # the code below assumes that the sources list - # contains not more than one .i SWIG interface file - if self.inplace: - target_dir = os.path.dirname(base) - py_target_dir = self.ext_target_dir - else: - target_dir = appendpath(self.build_src, os.path.dirname(base)) - py_target_dir = target_dir - if os.path.isfile(source): - name = get_swig_modulename(source) - if name != ext_name[1:]: - raise DistutilsSetupError( - 'mismatch of extension names: %s provides %r' - ' but expected %r' % (source, name, ext_name[1:])) - if typ is None: - typ = get_swig_target(source) - is_cpp = typ=='c++' - else: - typ2 = get_swig_target(source) - if typ2 is None: - log.warn('source %r does not define swig target, assuming %s swig target' \ - % (source, typ)) - elif typ!=typ2: - log.warn('expected %r but source %r defines %r swig target' \ - % (typ, source, typ2)) - if typ2=='c++': - log.warn('resetting swig target to c++ (some targets may have .c extension)') - is_cpp = True - else: - log.warn('assuming that %r has c++ swig target' % (source)) - if is_cpp: - target_ext = '.cpp' - target_file = os.path.join(target_dir, '%s_wrap%s' \ - % (name, target_ext)) - else: - log.warn(' source %s does not exist: skipping swig\'ing.' \ - % (source)) - name = ext_name[1:] - skip_swig = 1 - target_file = _find_swig_target(target_dir, name) - if not os.path.isfile(target_file): - log.warn(' target %s does not exist:\n '\ - 'Assuming %s_wrap.{c,cpp} was generated with '\ - '"build_src --inplace" command.' \ - % (target_file, name)) - target_dir = os.path.dirname(base) - target_file = _find_swig_target(target_dir, name) - if not os.path.isfile(target_file): - raise DistutilsSetupError("%r missing" % (target_file,)) - log.warn(' Yes! Using %r as up-to-date target.' \ - % (target_file)) - target_dirs.append(target_dir) - new_sources.append(target_file) - py_files.append(os.path.join(py_target_dir, name+'.py')) - swig_sources.append(source) - swig_targets[source] = new_sources[-1] - else: - new_sources.append(source) - - if not swig_sources: - return new_sources - - if skip_swig: - return new_sources + py_files - - for d in target_dirs: - self.mkpath(d) - - swig = self.swig or self.find_swig() - swig_cmd = [swig, "-python"] + extension.swig_opts - if is_cpp: - swig_cmd.append('-c++') - for d in extension.include_dirs: - swig_cmd.append('-I'+d) - for source in swig_sources: - target = swig_targets[source] - depends = [source] + extension.depends - if self.force or newer_group(depends, target, 'newer'): - log.info("%s: %s" % (os.path.basename(swig) \ - + (is_cpp and '++' or ''), source)) - self.spawn(swig_cmd + self.swig_opts \ - + ["-o", target, '-outdir', py_target_dir, source]) - else: - log.debug(" skipping '%s' swig interface (up-to-date)" \ - % (source)) - - return new_sources + py_files - -_f_pyf_ext_match = re.compile(r'.*\.(f90|f95|f77|for|ftn|f|pyf)\Z', re.I).match -_header_ext_match = re.compile(r'.*\.(inc|h|hpp)\Z', re.I).match - -#### SWIG related auxiliary functions #### -_swig_module_name_match = re.compile(r'\s*%module\s*(.*\(\s*package\s*=\s*"(?P[\w_]+)".*\)|)\s*(?P[\w_]+)', - re.I).match -_has_c_header = re.compile(r'-\*-\s*c\s*-\*-', re.I).search -_has_cpp_header = re.compile(r'-\*-\s*c\+\+\s*-\*-', re.I).search - -def get_swig_target(source): - with open(source) as f: - result = None - line = f.readline() - if _has_cpp_header(line): - result = 'c++' - if _has_c_header(line): - result = 'c' - return result - -def get_swig_modulename(source): - with open(source) as f: - name = None - for line in f: - m = _swig_module_name_match(line) - if m: - name = m.group('name') - break - return name - -def _find_swig_target(target_dir, name): - for ext in ['.cpp', '.c']: - target = os.path.join(target_dir, '%s_wrap%s' % (name, ext)) - if os.path.isfile(target): - break - return target - -#### F2PY related auxiliary functions #### - -_f2py_module_name_match = re.compile(r'\s*python\s*module\s*(?P[\w_]+)', - re.I).match -_f2py_user_module_name_match = re.compile(r'\s*python\s*module\s*(?P[\w_]*?' - r'__user__[\w_]*)', re.I).match - -def get_f2py_modulename(source): - name = None - with open(source) as f: - for line in f: - m = _f2py_module_name_match(line) - if m: - if _f2py_user_module_name_match(line): # skip *__user__* names - continue - name = m.group('name') - break - return name - -########################################## diff --git a/numpy/distutils/command/config.py b/numpy/distutils/command/config.py deleted file mode 100644 index 8bdfb7ec5823..000000000000 --- a/numpy/distutils/command/config.py +++ /dev/null @@ -1,516 +0,0 @@ -# Added Fortran compiler support to config. Currently useful only for -# try_compile call. try_run works but is untested for most of Fortran -# compilers (they must define linker_exe first). -# Pearu Peterson -import os -import signal -import subprocess -import sys -import textwrap -import warnings - -from distutils.command.config import config as old_config -from distutils.command.config import LANG_EXT -from distutils import log -from distutils.file_util import copy_file -from distutils.ccompiler import CompileError, LinkError -import distutils -from numpy.distutils.exec_command import filepath_from_subprocess_output -from numpy.distutils.mingw32ccompiler import generate_manifest -from numpy.distutils.command.autodist import (check_gcc_function_attribute, - check_gcc_function_attribute_with_intrinsics, - check_gcc_variable_attribute, - check_gcc_version_at_least, - check_inline, - check_restrict, - check_compiler_gcc) - -LANG_EXT['f77'] = '.f' -LANG_EXT['f90'] = '.f90' - -class config(old_config): - old_config.user_options += [ - ('fcompiler=', None, "specify the Fortran compiler type"), - ] - - def initialize_options(self): - self.fcompiler = None - old_config.initialize_options(self) - - def _check_compiler (self): - old_config._check_compiler(self) - from numpy.distutils.fcompiler import FCompiler, new_fcompiler - - if sys.platform == 'win32' and (self.compiler.compiler_type in - ('msvc', 'intelw', 'intelemw')): - # XXX: hack to circumvent a python 2.6 bug with msvc9compiler: - # initialize call query_vcvarsall, which throws an OSError, and - # causes an error along the way without much information. We try to - # catch it here, hoping it is early enough, and print a helpful - # message instead of Error: None. - if not self.compiler.initialized: - try: - self.compiler.initialize() - except OSError as e: - msg = textwrap.dedent("""\ - Could not initialize compiler instance: do you have Visual Studio - installed? If you are trying to build with MinGW, please use "python setup.py - build -c mingw32" instead. If you have Visual Studio installed, check it is - correctly installed, and the right version (VS 2015 as of this writing). - - Original exception was: %s, and the Compiler class was %s - ============================================================================""") \ - % (e, self.compiler.__class__.__name__) - print(textwrap.dedent("""\ - ============================================================================""")) - raise distutils.errors.DistutilsPlatformError(msg) from e - - # After MSVC is initialized, add an explicit /MANIFEST to linker - # flags. See issues gh-4245 and gh-4101 for details. Also - # relevant are issues 4431 and 16296 on the Python bug tracker. - from distutils import msvc9compiler - if msvc9compiler.get_build_version() >= 10: - for ldflags in [self.compiler.ldflags_shared, - self.compiler.ldflags_shared_debug]: - if '/MANIFEST' not in ldflags: - ldflags.append('/MANIFEST') - - if not isinstance(self.fcompiler, FCompiler): - self.fcompiler = new_fcompiler(compiler=self.fcompiler, - dry_run=self.dry_run, force=1, - c_compiler=self.compiler) - if self.fcompiler is not None: - self.fcompiler.customize(self.distribution) - if self.fcompiler.get_version(): - self.fcompiler.customize_cmd(self) - self.fcompiler.show_customization() - - def _wrap_method(self, mth, lang, args): - from distutils.ccompiler import CompileError - from distutils.errors import DistutilsExecError - save_compiler = self.compiler - if lang in ['f77', 'f90']: - self.compiler = self.fcompiler - if self.compiler is None: - raise CompileError('%s compiler is not set' % (lang,)) - try: - ret = mth(*((self,)+args)) - except (DistutilsExecError, CompileError) as e: - self.compiler = save_compiler - raise CompileError from e - self.compiler = save_compiler - return ret - - def _compile (self, body, headers, include_dirs, lang): - src, obj = self._wrap_method(old_config._compile, lang, - (body, headers, include_dirs, lang)) - # _compile in unixcompiler.py sometimes creates .d dependency files. - # Clean them up. - self.temp_files.append(obj + '.d') - return src, obj - - def _link (self, body, - headers, include_dirs, - libraries, library_dirs, lang): - if self.compiler.compiler_type=='msvc': - libraries = (libraries or [])[:] - library_dirs = (library_dirs or [])[:] - if lang in ['f77', 'f90']: - lang = 'c' # always use system linker when using MSVC compiler - if self.fcompiler: - for d in self.fcompiler.library_dirs or []: - # correct path when compiling in Cygwin but with - # normal Win Python - if d.startswith('/usr/lib'): - try: - d = subprocess.check_output(['cygpath', - '-w', d]) - except (OSError, subprocess.CalledProcessError): - pass - else: - d = filepath_from_subprocess_output(d) - library_dirs.append(d) - for libname in self.fcompiler.libraries or []: - if libname not in libraries: - libraries.append(libname) - for libname in libraries: - if libname.startswith('msvc'): continue - fileexists = False - for libdir in library_dirs or []: - libfile = os.path.join(libdir, '%s.lib' % (libname)) - if os.path.isfile(libfile): - fileexists = True - break - if fileexists: continue - # make g77-compiled static libs available to MSVC - fileexists = False - for libdir in library_dirs: - libfile = os.path.join(libdir, 'lib%s.a' % (libname)) - if os.path.isfile(libfile): - # copy libname.a file to name.lib so that MSVC linker - # can find it - libfile2 = os.path.join(libdir, '%s.lib' % (libname)) - copy_file(libfile, libfile2) - self.temp_files.append(libfile2) - fileexists = True - break - if fileexists: continue - log.warn('could not find library %r in directories %s' \ - % (libname, library_dirs)) - elif self.compiler.compiler_type == 'mingw32': - generate_manifest(self) - return self._wrap_method(old_config._link, lang, - (body, headers, include_dirs, - libraries, library_dirs, lang)) - - def check_header(self, header, include_dirs=None, library_dirs=None, lang='c'): - self._check_compiler() - return self.try_compile( - "/* we need a dummy line to make distutils happy */", - [header], include_dirs) - - def check_decl(self, symbol, - headers=None, include_dirs=None): - self._check_compiler() - body = textwrap.dedent(""" - int main(void) - { - #ifndef %s - (void) %s; - #endif - ; - return 0; - }""") % (symbol, symbol) - - return self.try_compile(body, headers, include_dirs) - - def check_macro_true(self, symbol, - headers=None, include_dirs=None): - self._check_compiler() - body = textwrap.dedent(""" - int main(void) - { - #if %s - #else - #error false or undefined macro - #endif - ; - return 0; - }""") % (symbol,) - - return self.try_compile(body, headers, include_dirs) - - def check_type(self, type_name, headers=None, include_dirs=None, - library_dirs=None): - """Check type availability. Return True if the type can be compiled, - False otherwise""" - self._check_compiler() - - # First check the type can be compiled - body = textwrap.dedent(r""" - int main(void) { - if ((%(name)s *) 0) - return 0; - if (sizeof (%(name)s)) - return 0; - } - """) % {'name': type_name} - - st = False - try: - try: - self._compile(body % {'type': type_name}, - headers, include_dirs, 'c') - st = True - except distutils.errors.CompileError: - st = False - finally: - self._clean() - - return st - - def check_type_size(self, type_name, headers=None, include_dirs=None, library_dirs=None, expected=None): - """Check size of a given type.""" - self._check_compiler() - - # First check the type can be compiled - body = textwrap.dedent(r""" - typedef %(type)s npy_check_sizeof_type; - int main (void) - { - static int test_array [1 - 2 * !(((long) (sizeof (npy_check_sizeof_type))) >= 0)]; - test_array [0] = 0 - - ; - return 0; - } - """) - self._compile(body % {'type': type_name}, - headers, include_dirs, 'c') - self._clean() - - if expected: - body = textwrap.dedent(r""" - typedef %(type)s npy_check_sizeof_type; - int main (void) - { - static int test_array [1 - 2 * !(((long) (sizeof (npy_check_sizeof_type))) == %(size)s)]; - test_array [0] = 0 - - ; - return 0; - } - """) - for size in expected: - try: - self._compile(body % {'type': type_name, 'size': size}, - headers, include_dirs, 'c') - self._clean() - return size - except CompileError: - pass - - # this fails to *compile* if size > sizeof(type) - body = textwrap.dedent(r""" - typedef %(type)s npy_check_sizeof_type; - int main (void) - { - static int test_array [1 - 2 * !(((long) (sizeof (npy_check_sizeof_type))) <= %(size)s)]; - test_array [0] = 0 - - ; - return 0; - } - """) - - # The principle is simple: we first find low and high bounds of size - # for the type, where low/high are looked up on a log scale. Then, we - # do a binary search to find the exact size between low and high - low = 0 - mid = 0 - while True: - try: - self._compile(body % {'type': type_name, 'size': mid}, - headers, include_dirs, 'c') - self._clean() - break - except CompileError: - #log.info("failure to test for bound %d" % mid) - low = mid + 1 - mid = 2 * mid + 1 - - high = mid - # Binary search: - while low != high: - mid = (high - low) // 2 + low - try: - self._compile(body % {'type': type_name, 'size': mid}, - headers, include_dirs, 'c') - self._clean() - high = mid - except CompileError: - low = mid + 1 - return low - - def check_func(self, func, - headers=None, include_dirs=None, - libraries=None, library_dirs=None, - decl=False, call=False, call_args=None): - # clean up distutils's config a bit: add void to main(), and - # return a value. - self._check_compiler() - body = [] - if decl: - if type(decl) == str: - body.append(decl) - else: - body.append("int %s (void);" % func) - # Handle MSVC intrinsics: force MS compiler to make a function call. - # Useful to test for some functions when built with optimization on, to - # avoid build error because the intrinsic and our 'fake' test - # declaration do not match. - body.append("#ifdef _MSC_VER") - body.append("#pragma function(%s)" % func) - body.append("#endif") - body.append("int main (void) {") - if call: - if call_args is None: - call_args = '' - body.append(" %s(%s);" % (func, call_args)) - else: - body.append(" %s;" % func) - body.append(" return 0;") - body.append("}") - body = '\n'.join(body) + "\n" - - return self.try_link(body, headers, include_dirs, - libraries, library_dirs) - - def check_funcs_once(self, funcs, - headers=None, include_dirs=None, - libraries=None, library_dirs=None, - decl=False, call=False, call_args=None): - """Check a list of functions at once. - - This is useful to speed up things, since all the functions in the funcs - list will be put in one compilation unit. - - Arguments - --------- - funcs : seq - list of functions to test - include_dirs : seq - list of header paths - libraries : seq - list of libraries to link the code snippet to - library_dirs : seq - list of library paths - decl : dict - for every (key, value), the declaration in the value will be - used for function in key. If a function is not in the - dictionary, no declaration will be used. - call : dict - for every item (f, value), if the value is True, a call will be - done to the function f. - """ - self._check_compiler() - body = [] - if decl: - for f, v in decl.items(): - if v: - body.append("int %s (void);" % f) - - # Handle MS intrinsics. See check_func for more info. - body.append("#ifdef _MSC_VER") - for func in funcs: - body.append("#pragma function(%s)" % func) - body.append("#endif") - - body.append("int main (void) {") - if call: - for f in funcs: - if f in call and call[f]: - if not (call_args and f in call_args and call_args[f]): - args = '' - else: - args = call_args[f] - body.append(" %s(%s);" % (f, args)) - else: - body.append(" %s;" % f) - else: - for f in funcs: - body.append(" %s;" % f) - body.append(" return 0;") - body.append("}") - body = '\n'.join(body) + "\n" - - return self.try_link(body, headers, include_dirs, - libraries, library_dirs) - - def check_inline(self): - """Return the inline keyword recognized by the compiler, empty string - otherwise.""" - return check_inline(self) - - def check_restrict(self): - """Return the restrict keyword recognized by the compiler, empty string - otherwise.""" - return check_restrict(self) - - def check_compiler_gcc(self): - """Return True if the C compiler is gcc""" - return check_compiler_gcc(self) - - def check_gcc_function_attribute(self, attribute, name): - return check_gcc_function_attribute(self, attribute, name) - - def check_gcc_function_attribute_with_intrinsics(self, attribute, name, - code, include): - return check_gcc_function_attribute_with_intrinsics(self, attribute, - name, code, include) - - def check_gcc_variable_attribute(self, attribute): - return check_gcc_variable_attribute(self, attribute) - - def check_gcc_version_at_least(self, major, minor=0, patchlevel=0): - """Return True if the GCC version is greater than or equal to the - specified version.""" - return check_gcc_version_at_least(self, major, minor, patchlevel) - - def get_output(self, body, headers=None, include_dirs=None, - libraries=None, library_dirs=None, - lang="c", use_tee=None): - """Try to compile, link to an executable, and run a program - built from 'body' and 'headers'. Returns the exit status code - of the program and its output. - """ - # 2008-11-16, RemoveMe - warnings.warn("\n+++++++++++++++++++++++++++++++++++++++++++++++++\n" - "Usage of get_output is deprecated: please do not \n" - "use it anymore, and avoid configuration checks \n" - "involving running executable on the target machine.\n" - "+++++++++++++++++++++++++++++++++++++++++++++++++\n", - DeprecationWarning, stacklevel=2) - self._check_compiler() - exitcode, output = 255, '' - try: - grabber = GrabStdout() - try: - src, obj, exe = self._link(body, headers, include_dirs, - libraries, library_dirs, lang) - grabber.restore() - except Exception: - output = grabber.data - grabber.restore() - raise - exe = os.path.join('.', exe) - try: - # specify cwd arg for consistency with - # historic usage pattern of exec_command() - # also, note that exe appears to be a string, - # which exec_command() handled, but we now - # use a list for check_output() -- this assumes - # that exe is always a single command - output = subprocess.check_output([exe], cwd='.') - except subprocess.CalledProcessError as exc: - exitstatus = exc.returncode - output = '' - except OSError: - # preserve the EnvironmentError exit status - # used historically in exec_command() - exitstatus = 127 - output = '' - else: - output = filepath_from_subprocess_output(output) - if hasattr(os, 'WEXITSTATUS'): - exitcode = os.WEXITSTATUS(exitstatus) - if os.WIFSIGNALED(exitstatus): - sig = os.WTERMSIG(exitstatus) - log.error('subprocess exited with signal %d' % (sig,)) - if sig == signal.SIGINT: - # control-C - raise KeyboardInterrupt - else: - exitcode = exitstatus - log.info("success!") - except (CompileError, LinkError): - log.info("failure.") - self._clean() - return exitcode, output - -class GrabStdout: - - def __init__(self): - self.sys_stdout = sys.stdout - self.data = '' - sys.stdout = self - - def write (self, data): - self.sys_stdout.write(data) - self.data += data - - def flush (self): - self.sys_stdout.flush() - - def restore(self): - sys.stdout = self.sys_stdout diff --git a/numpy/distutils/command/config_compiler.py b/numpy/distutils/command/config_compiler.py deleted file mode 100644 index ca4099886d8c..000000000000 --- a/numpy/distutils/command/config_compiler.py +++ /dev/null @@ -1,126 +0,0 @@ -from distutils.core import Command -from numpy.distutils import log - -#XXX: Linker flags - -def show_fortran_compilers(_cache=None): - # Using cache to prevent infinite recursion. - if _cache: - return - elif _cache is None: - _cache = [] - _cache.append(1) - from numpy.distutils.fcompiler import show_fcompilers - import distutils.core - dist = distutils.core._setup_distribution - show_fcompilers(dist) - -class config_fc(Command): - """ Distutils command to hold user specified options - to Fortran compilers. - - config_fc command is used by the FCompiler.customize() method. - """ - - description = "specify Fortran 77/Fortran 90 compiler information" - - user_options = [ - ('fcompiler=', None, "specify Fortran compiler type"), - ('f77exec=', None, "specify F77 compiler command"), - ('f90exec=', None, "specify F90 compiler command"), - ('f77flags=', None, "specify F77 compiler flags"), - ('f90flags=', None, "specify F90 compiler flags"), - ('opt=', None, "specify optimization flags"), - ('arch=', None, "specify architecture specific optimization flags"), - ('debug', 'g', "compile with debugging information"), - ('noopt', None, "compile without optimization"), - ('noarch', None, "compile without arch-dependent optimization"), - ] - - help_options = [ - ('help-fcompiler', None, "list available Fortran compilers", - show_fortran_compilers), - ] - - boolean_options = ['debug', 'noopt', 'noarch'] - - def initialize_options(self): - self.fcompiler = None - self.f77exec = None - self.f90exec = None - self.f77flags = None - self.f90flags = None - self.opt = None - self.arch = None - self.debug = None - self.noopt = None - self.noarch = None - - def finalize_options(self): - log.info('unifying config_fc, config, build_clib, build_ext, build commands --fcompiler options') - build_clib = self.get_finalized_command('build_clib') - build_ext = self.get_finalized_command('build_ext') - config = self.get_finalized_command('config') - build = self.get_finalized_command('build') - cmd_list = [self, config, build_clib, build_ext, build] - for a in ['fcompiler']: - l = [] - for c in cmd_list: - v = getattr(c, a) - if v is not None: - if not isinstance(v, str): v = v.compiler_type - if v not in l: l.append(v) - if not l: v1 = None - else: v1 = l[0] - if len(l)>1: - log.warn(' commands have different --%s options: %s'\ - ', using first in list as default' % (a, l)) - if v1: - for c in cmd_list: - if getattr(c, a) is None: setattr(c, a, v1) - - def run(self): - # Do nothing. - return - -class config_cc(Command): - """ Distutils command to hold user specified options - to C/C++ compilers. - """ - - description = "specify C/C++ compiler information" - - user_options = [ - ('compiler=', None, "specify C/C++ compiler type"), - ] - - def initialize_options(self): - self.compiler = None - - def finalize_options(self): - log.info('unifying config_cc, config, build_clib, build_ext, build commands --compiler options') - build_clib = self.get_finalized_command('build_clib') - build_ext = self.get_finalized_command('build_ext') - config = self.get_finalized_command('config') - build = self.get_finalized_command('build') - cmd_list = [self, config, build_clib, build_ext, build] - for a in ['compiler']: - l = [] - for c in cmd_list: - v = getattr(c, a) - if v is not None: - if not isinstance(v, str): v = v.compiler_type - if v not in l: l.append(v) - if not l: v1 = None - else: v1 = l[0] - if len(l)>1: - log.warn(' commands have different --%s options: %s'\ - ', using first in list as default' % (a, l)) - if v1: - for c in cmd_list: - if getattr(c, a) is None: setattr(c, a, v1) - return - - def run(self): - # Do nothing. - return diff --git a/numpy/distutils/command/develop.py b/numpy/distutils/command/develop.py deleted file mode 100644 index af24baf2e7e1..000000000000 --- a/numpy/distutils/command/develop.py +++ /dev/null @@ -1,15 +0,0 @@ -""" Override the develop command from setuptools so we can ensure that our -generated files (from build_src or build_scripts) are properly converted to real -files with filenames. - -""" -from setuptools.command.develop import develop as old_develop - -class develop(old_develop): - __doc__ = old_develop.__doc__ - def install_for_development(self): - # Build sources in-place, too. - self.reinitialize_command('build_src', inplace=1) - # Make sure scripts are built. - self.run_command('build_scripts') - old_develop.install_for_development(self) diff --git a/numpy/distutils/command/egg_info.py b/numpy/distutils/command/egg_info.py deleted file mode 100644 index 14c62b4d1b90..000000000000 --- a/numpy/distutils/command/egg_info.py +++ /dev/null @@ -1,25 +0,0 @@ -import sys - -from setuptools.command.egg_info import egg_info as _egg_info - -class egg_info(_egg_info): - def run(self): - if 'sdist' in sys.argv: - import warnings - import textwrap - msg = textwrap.dedent(""" - `build_src` is being run, this may lead to missing - files in your sdist! You want to use distutils.sdist - instead of the setuptools version: - - from distutils.command.sdist import sdist - cmdclass={'sdist': sdist}" - - See numpy's setup.py or gh-7131 for details.""") - warnings.warn(msg, UserWarning, stacklevel=2) - - # We need to ensure that build_src has been executed in order to give - # setuptools' egg_info command real filenames instead of functions which - # generate files. - self.run_command("build_src") - _egg_info.run(self) diff --git a/numpy/distutils/command/install.py b/numpy/distutils/command/install.py deleted file mode 100644 index efa9b4740fc4..000000000000 --- a/numpy/distutils/command/install.py +++ /dev/null @@ -1,79 +0,0 @@ -import sys -if 'setuptools' in sys.modules: - import setuptools.command.install as old_install_mod - have_setuptools = True -else: - import distutils.command.install as old_install_mod - have_setuptools = False -from distutils.file_util import write_file - -old_install = old_install_mod.install - -class install(old_install): - - # Always run install_clib - the command is cheap, so no need to bypass it; - # but it's not run by setuptools -- so it's run again in install_data - sub_commands = old_install.sub_commands + [ - ('install_clib', lambda x: True) - ] - - def finalize_options (self): - old_install.finalize_options(self) - self.install_lib = self.install_libbase - - def setuptools_run(self): - """ The setuptools version of the .run() method. - - We must pull in the entire code so we can override the level used in the - _getframe() call since we wrap this call by one more level. - """ - from distutils.command.install import install as distutils_install - - # Explicit request for old-style install? Just do it - if self.old_and_unmanageable or self.single_version_externally_managed: - return distutils_install.run(self) - - # Attempt to detect whether we were called from setup() or by another - # command. If we were called by setup(), our caller will be the - # 'run_command' method in 'distutils.dist', and *its* caller will be - # the 'run_commands' method. If we were called any other way, our - # immediate caller *might* be 'run_command', but it won't have been - # called by 'run_commands'. This is slightly kludgy, but seems to - # work. - # - caller = sys._getframe(3) - caller_module = caller.f_globals.get('__name__', '') - caller_name = caller.f_code.co_name - - if caller_module != 'distutils.dist' or caller_name!='run_commands': - # We weren't called from the command line or setup(), so we - # should run in backward-compatibility mode to support bdist_* - # commands. - distutils_install.run(self) - else: - self.do_egg_install() - - def run(self): - if not have_setuptools: - r = old_install.run(self) - else: - r = self.setuptools_run() - if self.record: - # bdist_rpm fails when INSTALLED_FILES contains - # paths with spaces. Such paths must be enclosed - # with double-quotes. - with open(self.record) as f: - lines = [] - need_rewrite = False - for l in f: - l = l.rstrip() - if ' ' in l: - need_rewrite = True - l = '"%s"' % (l) - lines.append(l) - if need_rewrite: - self.execute(write_file, - (self.record, lines), - "re-writing list of installed files to '%s'" % - self.record) - return r diff --git a/numpy/distutils/command/install_clib.py b/numpy/distutils/command/install_clib.py deleted file mode 100644 index aa2e5594c3c2..000000000000 --- a/numpy/distutils/command/install_clib.py +++ /dev/null @@ -1,40 +0,0 @@ -import os -from distutils.core import Command -from distutils.ccompiler import new_compiler -from numpy.distutils.misc_util import get_cmd - -class install_clib(Command): - description = "Command to install installable C libraries" - - user_options = [] - - def initialize_options(self): - self.install_dir = None - self.outfiles = [] - - def finalize_options(self): - self.set_undefined_options('install', ('install_lib', 'install_dir')) - - def run (self): - build_clib_cmd = get_cmd("build_clib") - if not build_clib_cmd.build_clib: - # can happen if the user specified `--skip-build` - build_clib_cmd.finalize_options() - build_dir = build_clib_cmd.build_clib - - # We need the compiler to get the library name -> filename association - if not build_clib_cmd.compiler: - compiler = new_compiler(compiler=None) - compiler.customize(self.distribution) - else: - compiler = build_clib_cmd.compiler - - for l in self.distribution.installed_libraries: - target_dir = os.path.join(self.install_dir, l.target_dir) - name = compiler.library_filename(l.name) - source = os.path.join(build_dir, name) - self.mkpath(target_dir) - self.outfiles.append(self.copy_file(source, target_dir)[0]) - - def get_outputs(self): - return self.outfiles diff --git a/numpy/distutils/command/install_data.py b/numpy/distutils/command/install_data.py deleted file mode 100644 index 0a2e68ae192a..000000000000 --- a/numpy/distutils/command/install_data.py +++ /dev/null @@ -1,24 +0,0 @@ -import sys -have_setuptools = ('setuptools' in sys.modules) - -from distutils.command.install_data import install_data as old_install_data - -#data installer with improved intelligence over distutils -#data files are copied into the project directory instead -#of willy-nilly -class install_data (old_install_data): - - def run(self): - old_install_data.run(self) - - if have_setuptools: - # Run install_clib again, since setuptools does not run sub-commands - # of install automatically - self.run_command('install_clib') - - def finalize_options (self): - self.set_undefined_options('install', - ('install_lib', 'install_dir'), - ('root', 'root'), - ('force', 'force'), - ) diff --git a/numpy/distutils/command/install_headers.py b/numpy/distutils/command/install_headers.py deleted file mode 100644 index 91eba6f17c29..000000000000 --- a/numpy/distutils/command/install_headers.py +++ /dev/null @@ -1,25 +0,0 @@ -import os -from distutils.command.install_headers import install_headers as old_install_headers - -class install_headers (old_install_headers): - - def run (self): - headers = self.distribution.headers - if not headers: - return - - prefix = os.path.dirname(self.install_dir) - for header in headers: - if isinstance(header, tuple): - # Kind of a hack, but I don't know where else to change this... - if header[0] == 'numpy._core': - header = ('numpy', header[1]) - if os.path.splitext(header[1])[1] == '.inc': - continue - d = os.path.join(*([prefix]+header[0].split('.'))) - header = header[1] - else: - d = self.install_dir - self.mkpath(d) - (out, _) = self.copy_file(header, d) - self.outfiles.append(out) diff --git a/numpy/distutils/command/sdist.py b/numpy/distutils/command/sdist.py deleted file mode 100644 index e34193883dea..000000000000 --- a/numpy/distutils/command/sdist.py +++ /dev/null @@ -1,27 +0,0 @@ -import sys -if 'setuptools' in sys.modules: - from setuptools.command.sdist import sdist as old_sdist -else: - from distutils.command.sdist import sdist as old_sdist - -from numpy.distutils.misc_util import get_data_files - -class sdist(old_sdist): - - def add_defaults (self): - old_sdist.add_defaults(self) - - dist = self.distribution - - if dist.has_data_files(): - for data in dist.data_files: - self.filelist.extend(get_data_files(data)) - - if dist.has_headers(): - headers = [] - for h in dist.headers: - if isinstance(h, str): headers.append(h) - else: headers.append(h[1]) - self.filelist.extend(headers) - - return diff --git a/numpy/distutils/core.py b/numpy/distutils/core.py deleted file mode 100644 index c4a14e59901f..000000000000 --- a/numpy/distutils/core.py +++ /dev/null @@ -1,215 +0,0 @@ -import sys -from distutils.core import Distribution - -if 'setuptools' in sys.modules: - have_setuptools = True - from setuptools import setup as old_setup - # easy_install imports math, it may be picked up from cwd - from setuptools.command import easy_install - try: - # very old versions of setuptools don't have this - from setuptools.command import bdist_egg - except ImportError: - have_setuptools = False -else: - from distutils.core import setup as old_setup - have_setuptools = False - -import warnings -import distutils.core -import distutils.dist - -from numpy.distutils.extension import Extension # noqa: F401 -from numpy.distutils.numpy_distribution import NumpyDistribution -from numpy.distutils.command import config, config_compiler, \ - build, build_py, build_ext, build_clib, build_src, build_scripts, \ - sdist, install_data, install_headers, install, bdist_rpm, \ - install_clib -from numpy.distutils.misc_util import is_sequence, is_string - -numpy_cmdclass = {'build': build.build, - 'build_src': build_src.build_src, - 'build_scripts': build_scripts.build_scripts, - 'config_cc': config_compiler.config_cc, - 'config_fc': config_compiler.config_fc, - 'config': config.config, - 'build_ext': build_ext.build_ext, - 'build_py': build_py.build_py, - 'build_clib': build_clib.build_clib, - 'sdist': sdist.sdist, - 'install_data': install_data.install_data, - 'install_headers': install_headers.install_headers, - 'install_clib': install_clib.install_clib, - 'install': install.install, - 'bdist_rpm': bdist_rpm.bdist_rpm, - } -if have_setuptools: - # Use our own versions of develop and egg_info to ensure that build_src is - # handled appropriately. - from numpy.distutils.command import develop, egg_info - numpy_cmdclass['bdist_egg'] = bdist_egg.bdist_egg - numpy_cmdclass['develop'] = develop.develop - numpy_cmdclass['easy_install'] = easy_install.easy_install - numpy_cmdclass['egg_info'] = egg_info.egg_info - -def _dict_append(d, **kws): - for k, v in kws.items(): - if k not in d: - d[k] = v - continue - dv = d[k] - if isinstance(dv, tuple): - d[k] = dv + tuple(v) - elif isinstance(dv, list): - d[k] = dv + list(v) - elif isinstance(dv, dict): - _dict_append(dv, **v) - elif is_string(dv): - d[k] = dv + v - else: - raise TypeError(repr(type(dv))) - -def _command_line_ok(_cache=None): - """ Return True if command line does not contain any - help or display requests. - """ - if _cache: - return _cache[0] - elif _cache is None: - _cache = [] - ok = True - display_opts = ['--'+n for n in Distribution.display_option_names] - for o in Distribution.display_options: - if o[1]: - display_opts.append('-'+o[1]) - for arg in sys.argv: - if arg.startswith('--help') or arg=='-h' or arg in display_opts: - ok = False - break - _cache.append(ok) - return ok - -def get_distribution(always=False): - dist = distutils.core._setup_distribution - # XXX Hack to get numpy installable with easy_install. - # The problem is easy_install runs it's own setup(), which - # sets up distutils.core._setup_distribution. However, - # when our setup() runs, that gets overwritten and lost. - # We can't use isinstance, as the DistributionWithoutHelpCommands - # class is local to a function in setuptools.command.easy_install - if dist is not None and \ - 'DistributionWithoutHelpCommands' in repr(dist): - dist = None - if always and dist is None: - dist = NumpyDistribution() - return dist - -def setup(**attr): - - cmdclass = numpy_cmdclass.copy() - - new_attr = attr.copy() - if 'cmdclass' in new_attr: - cmdclass.update(new_attr['cmdclass']) - new_attr['cmdclass'] = cmdclass - - if 'configuration' in new_attr: - # To avoid calling configuration if there are any errors - # or help request in command in the line. - configuration = new_attr.pop('configuration') - - old_dist = distutils.core._setup_distribution - old_stop = distutils.core._setup_stop_after - distutils.core._setup_distribution = None - distutils.core._setup_stop_after = "commandline" - try: - dist = setup(**new_attr) - finally: - distutils.core._setup_distribution = old_dist - distutils.core._setup_stop_after = old_stop - if dist.help or not _command_line_ok(): - # probably displayed help, skip running any commands - return dist - - # create setup dictionary and append to new_attr - config = configuration() - if hasattr(config, 'todict'): - config = config.todict() - _dict_append(new_attr, **config) - - # Move extension source libraries to libraries - libraries = [] - for ext in new_attr.get('ext_modules', []): - new_libraries = [] - for item in ext.libraries: - if is_sequence(item): - lib_name, build_info = item - _check_append_ext_library(libraries, lib_name, build_info) - new_libraries.append(lib_name) - elif is_string(item): - new_libraries.append(item) - else: - raise TypeError("invalid description of extension module " - "library %r" % (item,)) - ext.libraries = new_libraries - if libraries: - if 'libraries' not in new_attr: - new_attr['libraries'] = [] - for item in libraries: - _check_append_library(new_attr['libraries'], item) - - # sources in ext_modules or libraries may contain header files - if ('ext_modules' in new_attr or 'libraries' in new_attr) \ - and 'headers' not in new_attr: - new_attr['headers'] = [] - - # Use our custom NumpyDistribution class instead of distutils' one - new_attr['distclass'] = NumpyDistribution - - return old_setup(**new_attr) - -def _check_append_library(libraries, item): - for libitem in libraries: - if is_sequence(libitem): - if is_sequence(item): - if item[0]==libitem[0]: - if item[1] is libitem[1]: - return - warnings.warn("[0] libraries list contains %r with" - " different build_info" % (item[0],), - stacklevel=2) - break - else: - if item==libitem[0]: - warnings.warn("[1] libraries list contains %r with" - " no build_info" % (item[0],), - stacklevel=2) - break - else: - if is_sequence(item): - if item[0]==libitem: - warnings.warn("[2] libraries list contains %r with" - " no build_info" % (item[0],), - stacklevel=2) - break - else: - if item==libitem: - return - libraries.append(item) - -def _check_append_ext_library(libraries, lib_name, build_info): - for item in libraries: - if is_sequence(item): - if item[0]==lib_name: - if item[1] is build_info: - return - warnings.warn("[3] libraries list contains %r with" - " different build_info" % (lib_name,), - stacklevel=2) - break - elif item==lib_name: - warnings.warn("[4] libraries list contains %r with" - " no build_info" % (lib_name,), - stacklevel=2) - break - libraries.append((lib_name, build_info)) diff --git a/numpy/distutils/cpuinfo.py b/numpy/distutils/cpuinfo.py deleted file mode 100644 index 77620210981d..000000000000 --- a/numpy/distutils/cpuinfo.py +++ /dev/null @@ -1,683 +0,0 @@ -#!/usr/bin/env python3 -""" -cpuinfo - -Copyright 2002 Pearu Peterson all rights reserved, -Pearu Peterson -Permission to use, modify, and distribute this software is given under the -terms of the NumPy (BSD style) license. See LICENSE.txt that came with -this distribution for specifics. - -NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. -Pearu Peterson - -""" -__all__ = ['cpu'] - -import os -import platform -import re -import sys -import types -import warnings - -from subprocess import getstatusoutput - - -def getoutput(cmd, successful_status=(0,), stacklevel=1): - try: - status, output = getstatusoutput(cmd) - except OSError as e: - warnings.warn(str(e), UserWarning, stacklevel=stacklevel) - return False, "" - if os.WIFEXITED(status) and os.WEXITSTATUS(status) in successful_status: - return True, output - return False, output - -def command_info(successful_status=(0,), stacklevel=1, **kw): - info = {} - for key in kw: - ok, output = getoutput(kw[key], successful_status=successful_status, - stacklevel=stacklevel+1) - if ok: - info[key] = output.strip() - return info - -def command_by_line(cmd, successful_status=(0,), stacklevel=1): - ok, output = getoutput(cmd, successful_status=successful_status, - stacklevel=stacklevel+1) - if not ok: - return - for line in output.splitlines(): - yield line.strip() - -def key_value_from_command(cmd, sep, successful_status=(0,), - stacklevel=1): - d = {} - for line in command_by_line(cmd, successful_status=successful_status, - stacklevel=stacklevel+1): - l = [s.strip() for s in line.split(sep, 1)] - if len(l) == 2: - d[l[0]] = l[1] - return d - -class CPUInfoBase: - """Holds CPU information and provides methods for requiring - the availability of various CPU features. - """ - - def _try_call(self, func): - try: - return func() - except Exception: - pass - - def __getattr__(self, name): - if not name.startswith('_'): - if hasattr(self, '_'+name): - attr = getattr(self, '_'+name) - if isinstance(attr, types.MethodType): - return lambda func=self._try_call,attr=attr : func(attr) - else: - return lambda : None - raise AttributeError(name) - - def _getNCPUs(self): - return 1 - - def __get_nbits(self): - abits = platform.architecture()[0] - nbits = re.compile(r'(\d+)bit').search(abits).group(1) - return nbits - - def _is_32bit(self): - return self.__get_nbits() == '32' - - def _is_64bit(self): - return self.__get_nbits() == '64' - -class LinuxCPUInfo(CPUInfoBase): - - info = None - - def __init__(self): - if self.info is not None: - return - info = [ {} ] - ok, output = getoutput('uname -m') - if ok: - info[0]['uname_m'] = output.strip() - try: - fo = open('/proc/cpuinfo') - except OSError as e: - warnings.warn(str(e), UserWarning, stacklevel=2) - else: - for line in fo: - name_value = [s.strip() for s in line.split(':', 1)] - if len(name_value) != 2: - continue - name, value = name_value - if not info or name in info[-1]: # next processor - info.append({}) - info[-1][name] = value - fo.close() - self.__class__.info = info - - def _not_impl(self): pass - - # Athlon - - def _is_AMD(self): - return self.info[0]['vendor_id']=='AuthenticAMD' - - def _is_AthlonK6_2(self): - return self._is_AMD() and self.info[0]['model'] == '2' - - def _is_AthlonK6_3(self): - return self._is_AMD() and self.info[0]['model'] == '3' - - def _is_AthlonK6(self): - return re.match(r'.*?AMD-K6', self.info[0]['model name']) is not None - - def _is_AthlonK7(self): - return re.match(r'.*?AMD-K7', self.info[0]['model name']) is not None - - def _is_AthlonMP(self): - return re.match(r'.*?Athlon\(tm\) MP\b', - self.info[0]['model name']) is not None - - def _is_AMD64(self): - return self.is_AMD() and self.info[0]['family'] == '15' - - def _is_Athlon64(self): - return re.match(r'.*?Athlon\(tm\) 64\b', - self.info[0]['model name']) is not None - - def _is_AthlonHX(self): - return re.match(r'.*?Athlon HX\b', - self.info[0]['model name']) is not None - - def _is_Opteron(self): - return re.match(r'.*?Opteron\b', - self.info[0]['model name']) is not None - - def _is_Hammer(self): - return re.match(r'.*?Hammer\b', - self.info[0]['model name']) is not None - - # Alpha - - def _is_Alpha(self): - return self.info[0]['cpu']=='Alpha' - - def _is_EV4(self): - return self.is_Alpha() and self.info[0]['cpu model'] == 'EV4' - - def _is_EV5(self): - return self.is_Alpha() and self.info[0]['cpu model'] == 'EV5' - - def _is_EV56(self): - return self.is_Alpha() and self.info[0]['cpu model'] == 'EV56' - - def _is_PCA56(self): - return self.is_Alpha() and self.info[0]['cpu model'] == 'PCA56' - - # Intel - - #XXX - _is_i386 = _not_impl - - def _is_Intel(self): - return self.info[0]['vendor_id']=='GenuineIntel' - - def _is_i486(self): - return self.info[0]['cpu']=='i486' - - def _is_i586(self): - return self.is_Intel() and self.info[0]['cpu family'] == '5' - - def _is_i686(self): - return self.is_Intel() and self.info[0]['cpu family'] == '6' - - def _is_Celeron(self): - return re.match(r'.*?Celeron', - self.info[0]['model name']) is not None - - def _is_Pentium(self): - return re.match(r'.*?Pentium', - self.info[0]['model name']) is not None - - def _is_PentiumII(self): - return re.match(r'.*?Pentium.*?II\b', - self.info[0]['model name']) is not None - - def _is_PentiumPro(self): - return re.match(r'.*?PentiumPro\b', - self.info[0]['model name']) is not None - - def _is_PentiumMMX(self): - return re.match(r'.*?Pentium.*?MMX\b', - self.info[0]['model name']) is not None - - def _is_PentiumIII(self): - return re.match(r'.*?Pentium.*?III\b', - self.info[0]['model name']) is not None - - def _is_PentiumIV(self): - return re.match(r'.*?Pentium.*?(IV|4)\b', - self.info[0]['model name']) is not None - - def _is_PentiumM(self): - return re.match(r'.*?Pentium.*?M\b', - self.info[0]['model name']) is not None - - def _is_Prescott(self): - return self.is_PentiumIV() and self.has_sse3() - - def _is_Nocona(self): - return (self.is_Intel() - and (self.info[0]['cpu family'] == '6' - or self.info[0]['cpu family'] == '15') - and (self.has_sse3() and not self.has_ssse3()) - and re.match(r'.*?\blm\b', self.info[0]['flags']) is not None) - - def _is_Core2(self): - return (self.is_64bit() and self.is_Intel() and - re.match(r'.*?Core\(TM\)2\b', - self.info[0]['model name']) is not None) - - def _is_Itanium(self): - return re.match(r'.*?Itanium\b', - self.info[0]['family']) is not None - - def _is_XEON(self): - return re.match(r'.*?XEON\b', - self.info[0]['model name'], re.IGNORECASE) is not None - - _is_Xeon = _is_XEON - - # Varia - - def _is_singleCPU(self): - return len(self.info) == 1 - - def _getNCPUs(self): - return len(self.info) - - def _has_fdiv_bug(self): - return self.info[0]['fdiv_bug']=='yes' - - def _has_f00f_bug(self): - return self.info[0]['f00f_bug']=='yes' - - def _has_mmx(self): - return re.match(r'.*?\bmmx\b', self.info[0]['flags']) is not None - - def _has_sse(self): - return re.match(r'.*?\bsse\b', self.info[0]['flags']) is not None - - def _has_sse2(self): - return re.match(r'.*?\bsse2\b', self.info[0]['flags']) is not None - - def _has_sse3(self): - return re.match(r'.*?\bpni\b', self.info[0]['flags']) is not None - - def _has_ssse3(self): - return re.match(r'.*?\bssse3\b', self.info[0]['flags']) is not None - - def _has_3dnow(self): - return re.match(r'.*?\b3dnow\b', self.info[0]['flags']) is not None - - def _has_3dnowext(self): - return re.match(r'.*?\b3dnowext\b', self.info[0]['flags']) is not None - -class IRIXCPUInfo(CPUInfoBase): - info = None - - def __init__(self): - if self.info is not None: - return - info = key_value_from_command('sysconf', sep=' ', - successful_status=(0, 1)) - self.__class__.info = info - - def _not_impl(self): pass - - def _is_singleCPU(self): - return self.info.get('NUM_PROCESSORS') == '1' - - def _getNCPUs(self): - return int(self.info.get('NUM_PROCESSORS', 1)) - - def __cputype(self, n): - return self.info.get('PROCESSORS').split()[0].lower() == 'r%s' % (n) - def _is_r2000(self): return self.__cputype(2000) - def _is_r3000(self): return self.__cputype(3000) - def _is_r3900(self): return self.__cputype(3900) - def _is_r4000(self): return self.__cputype(4000) - def _is_r4100(self): return self.__cputype(4100) - def _is_r4300(self): return self.__cputype(4300) - def _is_r4400(self): return self.__cputype(4400) - def _is_r4600(self): return self.__cputype(4600) - def _is_r4650(self): return self.__cputype(4650) - def _is_r5000(self): return self.__cputype(5000) - def _is_r6000(self): return self.__cputype(6000) - def _is_r8000(self): return self.__cputype(8000) - def _is_r10000(self): return self.__cputype(10000) - def _is_r12000(self): return self.__cputype(12000) - def _is_rorion(self): return self.__cputype('orion') - - def get_ip(self): - try: return self.info.get('MACHINE') - except Exception: pass - def __machine(self, n): - return self.info.get('MACHINE').lower() == 'ip%s' % (n) - def _is_IP19(self): return self.__machine(19) - def _is_IP20(self): return self.__machine(20) - def _is_IP21(self): return self.__machine(21) - def _is_IP22(self): return self.__machine(22) - def _is_IP22_4k(self): return self.__machine(22) and self._is_r4000() - def _is_IP22_5k(self): return self.__machine(22) and self._is_r5000() - def _is_IP24(self): return self.__machine(24) - def _is_IP25(self): return self.__machine(25) - def _is_IP26(self): return self.__machine(26) - def _is_IP27(self): return self.__machine(27) - def _is_IP28(self): return self.__machine(28) - def _is_IP30(self): return self.__machine(30) - def _is_IP32(self): return self.__machine(32) - def _is_IP32_5k(self): return self.__machine(32) and self._is_r5000() - def _is_IP32_10k(self): return self.__machine(32) and self._is_r10000() - - -class DarwinCPUInfo(CPUInfoBase): - info = None - - def __init__(self): - if self.info is not None: - return - info = command_info(arch='arch', - machine='machine') - info['sysctl_hw'] = key_value_from_command('sysctl hw', sep='=') - self.__class__.info = info - - def _not_impl(self): pass - - def _getNCPUs(self): - return int(self.info['sysctl_hw'].get('hw.ncpu', 1)) - - def _is_Power_Macintosh(self): - return self.info['sysctl_hw']['hw.machine']=='Power Macintosh' - - def _is_i386(self): - return self.info['arch']=='i386' - def _is_ppc(self): - return self.info['arch']=='ppc' - - def __machine(self, n): - return self.info['machine'] == 'ppc%s'%n - def _is_ppc601(self): return self.__machine(601) - def _is_ppc602(self): return self.__machine(602) - def _is_ppc603(self): return self.__machine(603) - def _is_ppc603e(self): return self.__machine('603e') - def _is_ppc604(self): return self.__machine(604) - def _is_ppc604e(self): return self.__machine('604e') - def _is_ppc620(self): return self.__machine(620) - def _is_ppc630(self): return self.__machine(630) - def _is_ppc740(self): return self.__machine(740) - def _is_ppc7400(self): return self.__machine(7400) - def _is_ppc7450(self): return self.__machine(7450) - def _is_ppc750(self): return self.__machine(750) - def _is_ppc403(self): return self.__machine(403) - def _is_ppc505(self): return self.__machine(505) - def _is_ppc801(self): return self.__machine(801) - def _is_ppc821(self): return self.__machine(821) - def _is_ppc823(self): return self.__machine(823) - def _is_ppc860(self): return self.__machine(860) - - -class SunOSCPUInfo(CPUInfoBase): - - info = None - - def __init__(self): - if self.info is not None: - return - info = command_info(arch='arch', - mach='mach', - uname_i='uname_i', - isainfo_b='isainfo -b', - isainfo_n='isainfo -n', - ) - info['uname_X'] = key_value_from_command('uname -X', sep='=') - for line in command_by_line('psrinfo -v 0'): - m = re.match(r'\s*The (?P

[\w\d]+) processor operates at', line) - if m: - info['processor'] = m.group('p') - break - self.__class__.info = info - - def _not_impl(self): pass - - def _is_i386(self): - return self.info['isainfo_n']=='i386' - def _is_sparc(self): - return self.info['isainfo_n']=='sparc' - def _is_sparcv9(self): - return self.info['isainfo_n']=='sparcv9' - - def _getNCPUs(self): - return int(self.info['uname_X'].get('NumCPU', 1)) - - def _is_sun4(self): - return self.info['arch']=='sun4' - - def _is_SUNW(self): - return re.match(r'SUNW', self.info['uname_i']) is not None - def _is_sparcstation5(self): - return re.match(r'.*SPARCstation-5', self.info['uname_i']) is not None - def _is_ultra1(self): - return re.match(r'.*Ultra-1', self.info['uname_i']) is not None - def _is_ultra250(self): - return re.match(r'.*Ultra-250', self.info['uname_i']) is not None - def _is_ultra2(self): - return re.match(r'.*Ultra-2', self.info['uname_i']) is not None - def _is_ultra30(self): - return re.match(r'.*Ultra-30', self.info['uname_i']) is not None - def _is_ultra4(self): - return re.match(r'.*Ultra-4', self.info['uname_i']) is not None - def _is_ultra5_10(self): - return re.match(r'.*Ultra-5_10', self.info['uname_i']) is not None - def _is_ultra5(self): - return re.match(r'.*Ultra-5', self.info['uname_i']) is not None - def _is_ultra60(self): - return re.match(r'.*Ultra-60', self.info['uname_i']) is not None - def _is_ultra80(self): - return re.match(r'.*Ultra-80', self.info['uname_i']) is not None - def _is_ultraenterprice(self): - return re.match(r'.*Ultra-Enterprise', self.info['uname_i']) is not None - def _is_ultraenterprice10k(self): - return re.match(r'.*Ultra-Enterprise-10000', self.info['uname_i']) is not None - def _is_sunfire(self): - return re.match(r'.*Sun-Fire', self.info['uname_i']) is not None - def _is_ultra(self): - return re.match(r'.*Ultra', self.info['uname_i']) is not None - - def _is_cpusparcv7(self): - return self.info['processor']=='sparcv7' - def _is_cpusparcv8(self): - return self.info['processor']=='sparcv8' - def _is_cpusparcv9(self): - return self.info['processor']=='sparcv9' - -class Win32CPUInfo(CPUInfoBase): - - info = None - pkey = r"HARDWARE\DESCRIPTION\System\CentralProcessor" - # XXX: what does the value of - # HKEY_LOCAL_MACHINE\HARDWARE\DESCRIPTION\System\CentralProcessor\0 - # mean? - - def __init__(self): - if self.info is not None: - return - info = [] - try: - #XXX: Bad style to use so long `try:...except:...`. Fix it! - import winreg - - prgx = re.compile(r"family\s+(?P\d+)\s+model\s+(?P\d+)" - r"\s+stepping\s+(?P\d+)", re.IGNORECASE) - chnd=winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, self.pkey) - pnum=0 - while True: - try: - proc=winreg.EnumKey(chnd, pnum) - except winreg.error: - break - else: - pnum+=1 - info.append({"Processor":proc}) - phnd=winreg.OpenKey(chnd, proc) - pidx=0 - while True: - try: - name, value, vtpe=winreg.EnumValue(phnd, pidx) - except winreg.error: - break - else: - pidx=pidx+1 - info[-1][name]=value - if name=="Identifier": - srch=prgx.search(value) - if srch: - info[-1]["Family"]=int(srch.group("FML")) - info[-1]["Model"]=int(srch.group("MDL")) - info[-1]["Stepping"]=int(srch.group("STP")) - except Exception as e: - print(e, '(ignoring)') - self.__class__.info = info - - def _not_impl(self): pass - - # Athlon - - def _is_AMD(self): - return self.info[0]['VendorIdentifier']=='AuthenticAMD' - - def _is_Am486(self): - return self.is_AMD() and self.info[0]['Family']==4 - - def _is_Am5x86(self): - return self.is_AMD() and self.info[0]['Family']==4 - - def _is_AMDK5(self): - return self.is_AMD() and self.info[0]['Family']==5 \ - and self.info[0]['Model'] in [0, 1, 2, 3] - - def _is_AMDK6(self): - return self.is_AMD() and self.info[0]['Family']==5 \ - and self.info[0]['Model'] in [6, 7] - - def _is_AMDK6_2(self): - return self.is_AMD() and self.info[0]['Family']==5 \ - and self.info[0]['Model']==8 - - def _is_AMDK6_3(self): - return self.is_AMD() and self.info[0]['Family']==5 \ - and self.info[0]['Model']==9 - - def _is_AMDK7(self): - return self.is_AMD() and self.info[0]['Family'] == 6 - - # To reliably distinguish between the different types of AMD64 chips - # (Athlon64, Operton, Athlon64 X2, Semperon, Turion 64, etc.) would - # require looking at the 'brand' from cpuid - - def _is_AMD64(self): - return self.is_AMD() and self.info[0]['Family'] == 15 - - # Intel - - def _is_Intel(self): - return self.info[0]['VendorIdentifier']=='GenuineIntel' - - def _is_i386(self): - return self.info[0]['Family']==3 - - def _is_i486(self): - return self.info[0]['Family']==4 - - def _is_i586(self): - return self.is_Intel() and self.info[0]['Family']==5 - - def _is_i686(self): - return self.is_Intel() and self.info[0]['Family']==6 - - def _is_Pentium(self): - return self.is_Intel() and self.info[0]['Family']==5 - - def _is_PentiumMMX(self): - return self.is_Intel() and self.info[0]['Family']==5 \ - and self.info[0]['Model']==4 - - def _is_PentiumPro(self): - return self.is_Intel() and self.info[0]['Family']==6 \ - and self.info[0]['Model']==1 - - def _is_PentiumII(self): - return self.is_Intel() and self.info[0]['Family']==6 \ - and self.info[0]['Model'] in [3, 5, 6] - - def _is_PentiumIII(self): - return self.is_Intel() and self.info[0]['Family']==6 \ - and self.info[0]['Model'] in [7, 8, 9, 10, 11] - - def _is_PentiumIV(self): - return self.is_Intel() and self.info[0]['Family']==15 - - def _is_PentiumM(self): - return self.is_Intel() and self.info[0]['Family'] == 6 \ - and self.info[0]['Model'] in [9, 13, 14] - - def _is_Core2(self): - return self.is_Intel() and self.info[0]['Family'] == 6 \ - and self.info[0]['Model'] in [15, 16, 17] - - # Varia - - def _is_singleCPU(self): - return len(self.info) == 1 - - def _getNCPUs(self): - return len(self.info) - - def _has_mmx(self): - if self.is_Intel(): - return (self.info[0]['Family']==5 and self.info[0]['Model']==4) \ - or (self.info[0]['Family'] in [6, 15]) - elif self.is_AMD(): - return self.info[0]['Family'] in [5, 6, 15] - else: - return False - - def _has_sse(self): - if self.is_Intel(): - return ((self.info[0]['Family']==6 and - self.info[0]['Model'] in [7, 8, 9, 10, 11]) - or self.info[0]['Family']==15) - elif self.is_AMD(): - return ((self.info[0]['Family']==6 and - self.info[0]['Model'] in [6, 7, 8, 10]) - or self.info[0]['Family']==15) - else: - return False - - def _has_sse2(self): - if self.is_Intel(): - return self.is_Pentium4() or self.is_PentiumM() \ - or self.is_Core2() - elif self.is_AMD(): - return self.is_AMD64() - else: - return False - - def _has_3dnow(self): - return self.is_AMD() and self.info[0]['Family'] in [5, 6, 15] - - def _has_3dnowext(self): - return self.is_AMD() and self.info[0]['Family'] in [6, 15] - -if sys.platform.startswith('linux'): # variations: linux2,linux-i386 (any others?) - cpuinfo = LinuxCPUInfo -elif sys.platform.startswith('irix'): - cpuinfo = IRIXCPUInfo -elif sys.platform == 'darwin': - cpuinfo = DarwinCPUInfo -elif sys.platform.startswith('sunos'): - cpuinfo = SunOSCPUInfo -elif sys.platform.startswith('win32'): - cpuinfo = Win32CPUInfo -elif sys.platform.startswith('cygwin'): - cpuinfo = LinuxCPUInfo -#XXX: other OS's. Eg. use _winreg on Win32. Or os.uname on unices. -else: - cpuinfo = CPUInfoBase - -cpu = cpuinfo() - -#if __name__ == "__main__": -# -# cpu.is_blaa() -# cpu.is_Intel() -# cpu.is_Alpha() -# -# print('CPU information:'), -# for name in dir(cpuinfo): -# if name[0]=='_' and name[1]!='_': -# r = getattr(cpu,name[1:])() -# if r: -# if r!=1: -# print('%s=%s' %(name[1:],r)) -# else: -# print(name[1:]), -# print() diff --git a/numpy/distutils/exec_command.py b/numpy/distutils/exec_command.py deleted file mode 100644 index 2d06585a1497..000000000000 --- a/numpy/distutils/exec_command.py +++ /dev/null @@ -1,315 +0,0 @@ -""" -exec_command - -Implements exec_command function that is (almost) equivalent to -commands.getstatusoutput function but on NT, DOS systems the -returned status is actually correct (though, the returned status -values may be different by a factor). In addition, exec_command -takes keyword arguments for (re-)defining environment variables. - -Provides functions: - - exec_command --- execute command in a specified directory and - in the modified environment. - find_executable --- locate a command using info from environment - variable PATH. Equivalent to posix `which` - command. - -Author: Pearu Peterson -Created: 11 January 2003 - -Requires: Python 2.x - -Successfully tested on: - -======== ============ ================================================= -os.name sys.platform comments -======== ============ ================================================= -posix linux2 Debian (sid) Linux, Python 2.1.3+, 2.2.3+, 2.3.3 - PyCrust 0.9.3, Idle 1.0.2 -posix linux2 Red Hat 9 Linux, Python 2.1.3, 2.2.2, 2.3.2 -posix sunos5 SunOS 5.9, Python 2.2, 2.3.2 -posix darwin Darwin 7.2.0, Python 2.3 -nt win32 Windows Me - Python 2.3(EE), Idle 1.0, PyCrust 0.7.2 - Python 2.1.1 Idle 0.8 -nt win32 Windows 98, Python 2.1.1. Idle 0.8 -nt win32 Cygwin 98-4.10, Python 2.1.1(MSC) - echo tests - fail i.e. redefining environment variables may - not work. FIXED: don't use cygwin echo! - Comment: also `cmd /c echo` will not work - but redefining environment variables do work. -posix cygwin Cygwin 98-4.10, Python 2.3.3(cygming special) -nt win32 Windows XP, Python 2.3.3 -======== ============ ================================================= - -Known bugs: - -* Tests, that send messages to stderr, fail when executed from MSYS prompt - because the messages are lost at some point. - -""" -__all__ = ['exec_command', 'find_executable'] - -import os -import sys -import subprocess -import locale -import warnings - -from numpy.distutils.misc_util import is_sequence, make_temp_file -from numpy.distutils import log - -def filepath_from_subprocess_output(output): - """ - Convert `bytes` in the encoding used by a subprocess into a filesystem-appropriate `str`. - - Inherited from `exec_command`, and possibly incorrect. - """ - mylocale = locale.getpreferredencoding(False) - if mylocale is None: - mylocale = 'ascii' - output = output.decode(mylocale, errors='replace') - output = output.replace('\r\n', '\n') - # Another historical oddity - if output[-1:] == '\n': - output = output[:-1] - return output - - -def forward_bytes_to_stdout(val): - """ - Forward bytes from a subprocess call to the console, without attempting to - decode them. - - The assumption is that the subprocess call already returned bytes in - a suitable encoding. - """ - if hasattr(sys.stdout, 'buffer'): - # use the underlying binary output if there is one - sys.stdout.buffer.write(val) - elif hasattr(sys.stdout, 'encoding'): - # round-trip the encoding if necessary - sys.stdout.write(val.decode(sys.stdout.encoding)) - else: - # make a best-guess at the encoding - sys.stdout.write(val.decode('utf8', errors='replace')) - - -def temp_file_name(): - # 2019-01-30, 1.17 - warnings.warn('temp_file_name is deprecated since NumPy v1.17, use ' - 'tempfile.mkstemp instead', DeprecationWarning, stacklevel=1) - fo, name = make_temp_file() - fo.close() - return name - -def get_pythonexe(): - pythonexe = sys.executable - if os.name in ['nt', 'dos']: - fdir, fn = os.path.split(pythonexe) - fn = fn.upper().replace('PYTHONW', 'PYTHON') - pythonexe = os.path.join(fdir, fn) - assert os.path.isfile(pythonexe), '%r is not a file' % (pythonexe,) - return pythonexe - -def find_executable(exe, path=None, _cache={}): - """Return full path of a executable or None. - - Symbolic links are not followed. - """ - key = exe, path - try: - return _cache[key] - except KeyError: - pass - log.debug('find_executable(%r)' % exe) - orig_exe = exe - - if path is None: - path = os.environ.get('PATH', os.defpath) - if os.name=='posix': - realpath = os.path.realpath - else: - realpath = lambda a:a - - if exe.startswith('"'): - exe = exe[1:-1] - - suffixes = [''] - if os.name in ['nt', 'dos', 'os2']: - fn, ext = os.path.splitext(exe) - extra_suffixes = ['.exe', '.com', '.bat'] - if ext.lower() not in extra_suffixes: - suffixes = extra_suffixes - - if os.path.isabs(exe): - paths = [''] - else: - paths = [ os.path.abspath(p) for p in path.split(os.pathsep) ] - - for path in paths: - fn = os.path.join(path, exe) - for s in suffixes: - f_ext = fn+s - if not os.path.islink(f_ext): - f_ext = realpath(f_ext) - if os.path.isfile(f_ext) and os.access(f_ext, os.X_OK): - log.info('Found executable %s' % f_ext) - _cache[key] = f_ext - return f_ext - - log.warn('Could not locate executable %s' % orig_exe) - return None - -############################################################ - -def _preserve_environment( names ): - log.debug('_preserve_environment(%r)' % (names)) - env = {name: os.environ.get(name) for name in names} - return env - -def _update_environment( **env ): - log.debug('_update_environment(...)') - for name, value in env.items(): - os.environ[name] = value or '' - -def exec_command(command, execute_in='', use_shell=None, use_tee=None, - _with_python = 1, **env ): - """ - Return (status,output) of executed command. - - .. deprecated:: 1.17 - Use subprocess.Popen instead - - Parameters - ---------- - command : str - A concatenated string of executable and arguments. - execute_in : str - Before running command ``cd execute_in`` and after ``cd -``. - use_shell : {bool, None}, optional - If True, execute ``sh -c command``. Default None (True) - use_tee : {bool, None}, optional - If True use tee. Default None (True) - - - Returns - ------- - res : str - Both stdout and stderr messages. - - Notes - ----- - On NT, DOS systems the returned status is correct for external commands. - Wild cards will not work for non-posix systems or when use_shell=0. - - """ - # 2019-01-30, 1.17 - warnings.warn('exec_command is deprecated since NumPy v1.17, use ' - 'subprocess.Popen instead', DeprecationWarning, stacklevel=1) - log.debug('exec_command(%r,%s)' % (command, - ','.join(['%s=%r'%kv for kv in env.items()]))) - - if use_tee is None: - use_tee = os.name=='posix' - if use_shell is None: - use_shell = os.name=='posix' - execute_in = os.path.abspath(execute_in) - oldcwd = os.path.abspath(os.getcwd()) - - if __name__[-12:] == 'exec_command': - exec_dir = os.path.dirname(os.path.abspath(__file__)) - elif os.path.isfile('exec_command.py'): - exec_dir = os.path.abspath('.') - else: - exec_dir = os.path.abspath(sys.argv[0]) - if os.path.isfile(exec_dir): - exec_dir = os.path.dirname(exec_dir) - - if oldcwd!=execute_in: - os.chdir(execute_in) - log.debug('New cwd: %s' % execute_in) - else: - log.debug('Retaining cwd: %s' % oldcwd) - - oldenv = _preserve_environment( list(env.keys()) ) - _update_environment( **env ) - - try: - st = _exec_command(command, - use_shell=use_shell, - use_tee=use_tee, - **env) - finally: - if oldcwd!=execute_in: - os.chdir(oldcwd) - log.debug('Restored cwd to %s' % oldcwd) - _update_environment(**oldenv) - - return st - - -def _exec_command(command, use_shell=None, use_tee = None, **env): - """ - Internal workhorse for exec_command(). - """ - if use_shell is None: - use_shell = os.name=='posix' - if use_tee is None: - use_tee = os.name=='posix' - - if os.name == 'posix' and use_shell: - # On POSIX, subprocess always uses /bin/sh, override - sh = os.environ.get('SHELL', '/bin/sh') - if is_sequence(command): - command = [sh, '-c', ' '.join(command)] - else: - command = [sh, '-c', command] - use_shell = False - - elif os.name == 'nt' and is_sequence(command): - # On Windows, join the string for CreateProcess() ourselves as - # subprocess does it a bit differently - command = ' '.join(_quote_arg(arg) for arg in command) - - # Inherit environment by default - env = env or None - try: - # text is set to False so that communicate() - # will return bytes. We need to decode the output ourselves - # so that Python will not raise a UnicodeDecodeError when - # it encounters an invalid character; rather, we simply replace it - proc = subprocess.Popen(command, shell=use_shell, env=env, text=False, - stdout=subprocess.PIPE, - stderr=subprocess.STDOUT) - except OSError: - # Return 127, as os.spawn*() and /bin/sh do - return 127, '' - - text, err = proc.communicate() - mylocale = locale.getpreferredencoding(False) - if mylocale is None: - mylocale = 'ascii' - text = text.decode(mylocale, errors='replace') - text = text.replace('\r\n', '\n') - # Another historical oddity - if text[-1:] == '\n': - text = text[:-1] - - if use_tee and text: - print(text) - return proc.returncode, text - - -def _quote_arg(arg): - """ - Quote the argument for safe use in a shell command line. - """ - # If there is a quote in the string, assume relevant parts of the - # string are already quoted (e.g. '-I"C:\\Program Files\\..."') - if '"' not in arg and ' ' in arg: - return '"%s"' % arg - return arg - -############################################################ diff --git a/numpy/distutils/extension.py b/numpy/distutils/extension.py deleted file mode 100644 index 06e6441e65df..000000000000 --- a/numpy/distutils/extension.py +++ /dev/null @@ -1,101 +0,0 @@ -"""distutils.extension - -Provides the Extension class, used to describe C/C++ extension -modules in setup scripts. - -Overridden to support f2py. - -""" -import re -from distutils.extension import Extension as old_Extension - - -cxx_ext_re = re.compile(r'.*\.(cpp|cxx|cc)\Z', re.I).match -fortran_pyf_ext_re = re.compile(r'.*\.(f90|f95|f77|for|ftn|f|pyf)\Z', re.I).match - - -class Extension(old_Extension): - """ - Parameters - ---------- - name : str - Extension name. - sources : list of str - List of source file locations relative to the top directory of - the package. - extra_compile_args : list of str - Extra command line arguments to pass to the compiler. - extra_f77_compile_args : list of str - Extra command line arguments to pass to the fortran77 compiler. - extra_f90_compile_args : list of str - Extra command line arguments to pass to the fortran90 compiler. - """ - def __init__( - self, name, sources, - include_dirs=None, - define_macros=None, - undef_macros=None, - library_dirs=None, - libraries=None, - runtime_library_dirs=None, - extra_objects=None, - extra_compile_args=None, - extra_link_args=None, - export_symbols=None, - swig_opts=None, - depends=None, - language=None, - f2py_options=None, - module_dirs=None, - extra_c_compile_args=None, - extra_cxx_compile_args=None, - extra_f77_compile_args=None, - extra_f90_compile_args=None,): - - old_Extension.__init__( - self, name, [], - include_dirs=include_dirs, - define_macros=define_macros, - undef_macros=undef_macros, - library_dirs=library_dirs, - libraries=libraries, - runtime_library_dirs=runtime_library_dirs, - extra_objects=extra_objects, - extra_compile_args=extra_compile_args, - extra_link_args=extra_link_args, - export_symbols=export_symbols) - - # Avoid assert statements checking that sources contains strings: - self.sources = sources - - # Python 2.4 distutils new features - self.swig_opts = swig_opts or [] - # swig_opts is assumed to be a list. Here we handle the case where it - # is specified as a string instead. - if isinstance(self.swig_opts, str): - import warnings - msg = "swig_opts is specified as a string instead of a list" - warnings.warn(msg, SyntaxWarning, stacklevel=2) - self.swig_opts = self.swig_opts.split() - - # Python 2.3 distutils new features - self.depends = depends or [] - self.language = language - - # numpy_distutils features - self.f2py_options = f2py_options or [] - self.module_dirs = module_dirs or [] - self.extra_c_compile_args = extra_c_compile_args or [] - self.extra_cxx_compile_args = extra_cxx_compile_args or [] - self.extra_f77_compile_args = extra_f77_compile_args or [] - self.extra_f90_compile_args = extra_f90_compile_args or [] - - return - - def has_cxx_sources(self): - return any(cxx_ext_re(str(source)) for source in self.sources) - - def has_f2py_sources(self): - return any(fortran_pyf_ext_re(source) for source in self.sources) - -# class Extension diff --git a/numpy/distutils/fcompiler/__init__.py b/numpy/distutils/fcompiler/__init__.py deleted file mode 100644 index 5160e2abf54f..000000000000 --- a/numpy/distutils/fcompiler/__init__.py +++ /dev/null @@ -1,1035 +0,0 @@ -"""numpy.distutils.fcompiler - -Contains FCompiler, an abstract base class that defines the interface -for the numpy.distutils Fortran compiler abstraction model. - -Terminology: - -To be consistent, where the term 'executable' is used, it means the single -file, like 'gcc', that is executed, and should be a string. In contrast, -'command' means the entire command line, like ['gcc', '-c', 'file.c'], and -should be a list. - -But note that FCompiler.executables is actually a dictionary of commands. - -""" -__all__ = ['FCompiler', 'new_fcompiler', 'show_fcompilers', - 'dummy_fortran_file'] - -import os -import sys -import re -from pathlib import Path - -from distutils.sysconfig import get_python_lib -from distutils.fancy_getopt import FancyGetopt -from distutils.errors import DistutilsModuleError, \ - DistutilsExecError, CompileError, LinkError, DistutilsPlatformError -from distutils.util import split_quoted, strtobool - -from numpy.distutils.ccompiler import CCompiler, gen_lib_options -from numpy.distutils import log -from numpy.distutils.misc_util import is_string, all_strings, is_sequence, \ - make_temp_file, get_shared_lib_extension -from numpy.distutils.exec_command import find_executable -from numpy.distutils import _shell_utils - -from .environment import EnvironmentConfig - -__metaclass__ = type - - -FORTRAN_COMMON_FIXED_EXTENSIONS = ['.for', '.ftn', '.f77', '.f'] - - -class CompilerNotFound(Exception): - pass - -def flaglist(s): - if is_string(s): - return split_quoted(s) - else: - return s - -def str2bool(s): - if is_string(s): - return strtobool(s) - return bool(s) - -def is_sequence_of_strings(seq): - return is_sequence(seq) and all_strings(seq) - -class FCompiler(CCompiler): - """Abstract base class to define the interface that must be implemented - by real Fortran compiler classes. - - Methods that subclasses may redefine: - - update_executables(), find_executables(), get_version() - get_flags(), get_flags_opt(), get_flags_arch(), get_flags_debug() - get_flags_f77(), get_flags_opt_f77(), get_flags_arch_f77(), - get_flags_debug_f77(), get_flags_f90(), get_flags_opt_f90(), - get_flags_arch_f90(), get_flags_debug_f90(), - get_flags_fix(), get_flags_linker_so() - - DON'T call these methods (except get_version) after - constructing a compiler instance or inside any other method. - All methods, except update_executables() and find_executables(), - may call the get_version() method. - - After constructing a compiler instance, always call customize(dist=None) - method that finalizes compiler construction and makes the following - attributes available: - compiler_f77 - compiler_f90 - compiler_fix - linker_so - archiver - ranlib - libraries - library_dirs - """ - - # These are the environment variables and distutils keys used. - # Each configuration description is - # (, , , , ) - # The hook names are handled by the self._environment_hook method. - # - names starting with 'self.' call methods in this class - # - names starting with 'exe.' return the key in the executables dict - # - names like 'flags.YYY' return self.get_flag_YYY() - # convert is either None or a function to convert a string to the - # appropriate type used. - - distutils_vars = EnvironmentConfig( - distutils_section='config_fc', - noopt = (None, None, 'noopt', str2bool, False), - noarch = (None, None, 'noarch', str2bool, False), - debug = (None, None, 'debug', str2bool, False), - verbose = (None, None, 'verbose', str2bool, False), - ) - - command_vars = EnvironmentConfig( - distutils_section='config_fc', - compiler_f77 = ('exe.compiler_f77', 'F77', 'f77exec', None, False), - compiler_f90 = ('exe.compiler_f90', 'F90', 'f90exec', None, False), - compiler_fix = ('exe.compiler_fix', 'F90', 'f90exec', None, False), - version_cmd = ('exe.version_cmd', None, None, None, False), - linker_so = ('exe.linker_so', 'LDSHARED', 'ldshared', None, False), - linker_exe = ('exe.linker_exe', 'LD', 'ld', None, False), - archiver = (None, 'AR', 'ar', None, False), - ranlib = (None, 'RANLIB', 'ranlib', None, False), - ) - - flag_vars = EnvironmentConfig( - distutils_section='config_fc', - f77 = ('flags.f77', 'F77FLAGS', 'f77flags', flaglist, True), - f90 = ('flags.f90', 'F90FLAGS', 'f90flags', flaglist, True), - free = ('flags.free', 'FREEFLAGS', 'freeflags', flaglist, True), - fix = ('flags.fix', None, None, flaglist, False), - opt = ('flags.opt', 'FOPT', 'opt', flaglist, True), - opt_f77 = ('flags.opt_f77', None, None, flaglist, False), - opt_f90 = ('flags.opt_f90', None, None, flaglist, False), - arch = ('flags.arch', 'FARCH', 'arch', flaglist, False), - arch_f77 = ('flags.arch_f77', None, None, flaglist, False), - arch_f90 = ('flags.arch_f90', None, None, flaglist, False), - debug = ('flags.debug', 'FDEBUG', 'fdebug', flaglist, True), - debug_f77 = ('flags.debug_f77', None, None, flaglist, False), - debug_f90 = ('flags.debug_f90', None, None, flaglist, False), - flags = ('self.get_flags', 'FFLAGS', 'fflags', flaglist, True), - linker_so = ('flags.linker_so', 'LDFLAGS', 'ldflags', flaglist, True), - linker_exe = ('flags.linker_exe', 'LDFLAGS', 'ldflags', flaglist, True), - ar = ('flags.ar', 'ARFLAGS', 'arflags', flaglist, True), - ) - - language_map = {'.f': 'f77', - '.for': 'f77', - '.F': 'f77', # XXX: needs preprocessor - '.ftn': 'f77', - '.f77': 'f77', - '.f90': 'f90', - '.F90': 'f90', # XXX: needs preprocessor - '.f95': 'f90', - } - language_order = ['f90', 'f77'] - - - # These will be set by the subclass - - compiler_type = None - compiler_aliases = () - version_pattern = None - - possible_executables = [] - executables = { - 'version_cmd': ["f77", "-v"], - 'compiler_f77': ["f77"], - 'compiler_f90': ["f90"], - 'compiler_fix': ["f90", "-fixed"], - 'linker_so': ["f90", "-shared"], - 'linker_exe': ["f90"], - 'archiver': ["ar", "-cr"], - 'ranlib': None, - } - - # If compiler does not support compiling Fortran 90 then it can - # suggest using another compiler. For example, gnu would suggest - # gnu95 compiler type when there are F90 sources. - suggested_f90_compiler = None - - compile_switch = "-c" - object_switch = "-o " # Ending space matters! It will be stripped - # but if it is missing then object_switch - # will be prefixed to object file name by - # string concatenation. - library_switch = "-o " # Ditto! - - # Switch to specify where module files are created and searched - # for USE statement. Normally it is a string and also here ending - # space matters. See above. - module_dir_switch = None - - # Switch to specify where module files are searched for USE statement. - module_include_switch = '-I' - - pic_flags = [] # Flags to create position-independent code - - src_extensions = ['.for', '.ftn', '.f77', '.f', '.f90', '.f95', '.F', '.F90', '.FOR'] - obj_extension = ".o" - - shared_lib_extension = get_shared_lib_extension() - static_lib_extension = ".a" # or .lib - static_lib_format = "lib%s%s" # or %s%s - shared_lib_format = "%s%s" - exe_extension = "" - - _exe_cache = {} - - _executable_keys = ['version_cmd', 'compiler_f77', 'compiler_f90', - 'compiler_fix', 'linker_so', 'linker_exe', 'archiver', - 'ranlib'] - - # This will be set by new_fcompiler when called in - # command/{build_ext.py, build_clib.py, config.py} files. - c_compiler = None - - # extra_{f77,f90}_compile_args are set by build_ext.build_extension method - extra_f77_compile_args = [] - extra_f90_compile_args = [] - - def __init__(self, *args, **kw): - CCompiler.__init__(self, *args, **kw) - self.distutils_vars = self.distutils_vars.clone(self._environment_hook) - self.command_vars = self.command_vars.clone(self._environment_hook) - self.flag_vars = self.flag_vars.clone(self._environment_hook) - self.executables = self.executables.copy() - for e in self._executable_keys: - if e not in self.executables: - self.executables[e] = None - - # Some methods depend on .customize() being called first, so - # this keeps track of whether that's happened yet. - self._is_customised = False - - def __copy__(self): - obj = self.__new__(self.__class__) - obj.__dict__.update(self.__dict__) - obj.distutils_vars = obj.distutils_vars.clone(obj._environment_hook) - obj.command_vars = obj.command_vars.clone(obj._environment_hook) - obj.flag_vars = obj.flag_vars.clone(obj._environment_hook) - obj.executables = obj.executables.copy() - return obj - - def copy(self): - return self.__copy__() - - # Use properties for the attributes used by CCompiler. Setting them - # as attributes from the self.executables dictionary is error-prone, - # so we get them from there each time. - def _command_property(key): - def fget(self): - assert self._is_customised - return self.executables[key] - return property(fget=fget) - version_cmd = _command_property('version_cmd') - compiler_f77 = _command_property('compiler_f77') - compiler_f90 = _command_property('compiler_f90') - compiler_fix = _command_property('compiler_fix') - linker_so = _command_property('linker_so') - linker_exe = _command_property('linker_exe') - archiver = _command_property('archiver') - ranlib = _command_property('ranlib') - - # Make our terminology consistent. - def set_executable(self, key, value): - self.set_command(key, value) - - def set_commands(self, **kw): - for k, v in kw.items(): - self.set_command(k, v) - - def set_command(self, key, value): - if not key in self._executable_keys: - raise ValueError( - "unknown executable '%s' for class %s" % - (key, self.__class__.__name__)) - if is_string(value): - value = split_quoted(value) - assert value is None or is_sequence_of_strings(value[1:]), (key, value) - self.executables[key] = value - - ###################################################################### - ## Methods that subclasses may redefine. But don't call these methods! - ## They are private to FCompiler class and may return unexpected - ## results if used elsewhere. So, you have been warned.. - - def find_executables(self): - """Go through the self.executables dictionary, and attempt to - find and assign appropriate executables. - - Executable names are looked for in the environment (environment - variables, the distutils.cfg, and command line), the 0th-element of - the command list, and the self.possible_executables list. - - Also, if the 0th element is "" or "", the Fortran 77 - or the Fortran 90 compiler executable is used, unless overridden - by an environment setting. - - Subclasses should call this if overridden. - """ - assert self._is_customised - exe_cache = self._exe_cache - def cached_find_executable(exe): - if exe in exe_cache: - return exe_cache[exe] - fc_exe = find_executable(exe) - exe_cache[exe] = exe_cache[fc_exe] = fc_exe - return fc_exe - def verify_command_form(name, value): - if value is not None and not is_sequence_of_strings(value): - raise ValueError( - "%s value %r is invalid in class %s" % - (name, value, self.__class__.__name__)) - def set_exe(exe_key, f77=None, f90=None): - cmd = self.executables.get(exe_key, None) - if not cmd: - return None - # Note that we get cmd[0] here if the environment doesn't - # have anything set - exe_from_environ = getattr(self.command_vars, exe_key) - if not exe_from_environ: - possibles = [f90, f77] + self.possible_executables - else: - possibles = [exe_from_environ] + self.possible_executables - - seen = set() - unique_possibles = [] - for e in possibles: - if e == '': - e = f77 - elif e == '': - e = f90 - if not e or e in seen: - continue - seen.add(e) - unique_possibles.append(e) - - for exe in unique_possibles: - fc_exe = cached_find_executable(exe) - if fc_exe: - cmd[0] = fc_exe - return fc_exe - self.set_command(exe_key, None) - return None - - ctype = self.compiler_type - f90 = set_exe('compiler_f90') - if not f90: - f77 = set_exe('compiler_f77') - if f77: - log.warn('%s: no Fortran 90 compiler found' % ctype) - else: - raise CompilerNotFound('%s: f90 nor f77' % ctype) - else: - f77 = set_exe('compiler_f77', f90=f90) - if not f77: - log.warn('%s: no Fortran 77 compiler found' % ctype) - set_exe('compiler_fix', f90=f90) - - set_exe('linker_so', f77=f77, f90=f90) - set_exe('linker_exe', f77=f77, f90=f90) - set_exe('version_cmd', f77=f77, f90=f90) - set_exe('archiver') - set_exe('ranlib') - - def update_executables(self): - """Called at the beginning of customisation. Subclasses should - override this if they need to set up the executables dictionary. - - Note that self.find_executables() is run afterwards, so the - self.executables dictionary values can contain or as - the command, which will be replaced by the found F77 or F90 - compiler. - """ - pass - - def get_flags(self): - """List of flags common to all compiler types.""" - return [] + self.pic_flags - - def _get_command_flags(self, key): - cmd = self.executables.get(key, None) - if cmd is None: - return [] - return cmd[1:] - - def get_flags_f77(self): - """List of Fortran 77 specific flags.""" - return self._get_command_flags('compiler_f77') - def get_flags_f90(self): - """List of Fortran 90 specific flags.""" - return self._get_command_flags('compiler_f90') - def get_flags_free(self): - """List of Fortran 90 free format specific flags.""" - return [] - def get_flags_fix(self): - """List of Fortran 90 fixed format specific flags.""" - return self._get_command_flags('compiler_fix') - def get_flags_linker_so(self): - """List of linker flags to build a shared library.""" - return self._get_command_flags('linker_so') - def get_flags_linker_exe(self): - """List of linker flags to build an executable.""" - return self._get_command_flags('linker_exe') - def get_flags_ar(self): - """List of archiver flags. """ - return self._get_command_flags('archiver') - def get_flags_opt(self): - """List of architecture independent compiler flags.""" - return [] - def get_flags_arch(self): - """List of architecture dependent compiler flags.""" - return [] - def get_flags_debug(self): - """List of compiler flags to compile with debugging information.""" - return [] - - get_flags_opt_f77 = get_flags_opt_f90 = get_flags_opt - get_flags_arch_f77 = get_flags_arch_f90 = get_flags_arch - get_flags_debug_f77 = get_flags_debug_f90 = get_flags_debug - - def get_libraries(self): - """List of compiler libraries.""" - return self.libraries[:] - def get_library_dirs(self): - """List of compiler library directories.""" - return self.library_dirs[:] - - def get_version(self, force=False, ok_status=[0]): - assert self._is_customised - version = CCompiler.get_version(self, force=force, ok_status=ok_status) - if version is None: - raise CompilerNotFound() - return version - - - ############################################################ - - ## Public methods: - - def customize(self, dist = None): - """Customize Fortran compiler. - - This method gets Fortran compiler specific information from - (i) class definition, (ii) environment, (iii) distutils config - files, and (iv) command line (later overrides earlier). - - This method should be always called after constructing a - compiler instance. But not in __init__ because Distribution - instance is needed for (iii) and (iv). - """ - log.info('customize %s' % (self.__class__.__name__)) - - self._is_customised = True - - self.distutils_vars.use_distribution(dist) - self.command_vars.use_distribution(dist) - self.flag_vars.use_distribution(dist) - - self.update_executables() - - # find_executables takes care of setting the compiler commands, - # version_cmd, linker_so, linker_exe, ar, and ranlib - self.find_executables() - - noopt = self.distutils_vars.get('noopt', False) - noarch = self.distutils_vars.get('noarch', noopt) - debug = self.distutils_vars.get('debug', False) - - f77 = self.command_vars.compiler_f77 - f90 = self.command_vars.compiler_f90 - - f77flags = [] - f90flags = [] - freeflags = [] - fixflags = [] - - if f77: - f77 = _shell_utils.NativeParser.split(f77) - f77flags = self.flag_vars.f77 - if f90: - f90 = _shell_utils.NativeParser.split(f90) - f90flags = self.flag_vars.f90 - freeflags = self.flag_vars.free - # XXX Assuming that free format is default for f90 compiler. - fix = self.command_vars.compiler_fix - # NOTE: this and similar examples are probably just - # excluding --coverage flag when F90 = gfortran --coverage - # instead of putting that flag somewhere more appropriate - # this and similar examples where a Fortran compiler - # environment variable has been customized by CI or a user - # should perhaps eventually be more thoroughly tested and more - # robustly handled - if fix: - fix = _shell_utils.NativeParser.split(fix) - fixflags = self.flag_vars.fix + f90flags - - oflags, aflags, dflags = [], [], [] - # examine get_flags__ for extra flags - # only add them if the method is different from get_flags_ - def get_flags(tag, flags): - # note that self.flag_vars. calls self.get_flags_() - flags.extend(getattr(self.flag_vars, tag)) - this_get = getattr(self, 'get_flags_' + tag) - for name, c, flagvar in [('f77', f77, f77flags), - ('f90', f90, f90flags), - ('f90', fix, fixflags)]: - t = '%s_%s' % (tag, name) - if c and this_get is not getattr(self, 'get_flags_' + t): - flagvar.extend(getattr(self.flag_vars, t)) - if not noopt: - get_flags('opt', oflags) - if not noarch: - get_flags('arch', aflags) - if debug: - get_flags('debug', dflags) - - fflags = self.flag_vars.flags + dflags + oflags + aflags - - if f77: - self.set_commands(compiler_f77=f77+f77flags+fflags) - if f90: - self.set_commands(compiler_f90=f90+freeflags+f90flags+fflags) - if fix: - self.set_commands(compiler_fix=fix+fixflags+fflags) - - - #XXX: Do we need LDSHARED->SOSHARED, LDFLAGS->SOFLAGS - linker_so = self.linker_so - if linker_so: - linker_so_flags = self.flag_vars.linker_so - if sys.platform.startswith('aix'): - python_lib = get_python_lib(standard_lib=1) - ld_so_aix = os.path.join(python_lib, 'config', 'ld_so_aix') - python_exp = os.path.join(python_lib, 'config', 'python.exp') - linker_so = [ld_so_aix] + linker_so + ['-bI:'+python_exp] - if sys.platform.startswith('os400'): - from distutils.sysconfig import get_config_var - python_config = get_config_var('LIBPL') - ld_so_aix = os.path.join(python_config, 'ld_so_aix') - python_exp = os.path.join(python_config, 'python.exp') - linker_so = [ld_so_aix] + linker_so + ['-bI:'+python_exp] - self.set_commands(linker_so=linker_so+linker_so_flags) - - linker_exe = self.linker_exe - if linker_exe: - linker_exe_flags = self.flag_vars.linker_exe - self.set_commands(linker_exe=linker_exe+linker_exe_flags) - - ar = self.command_vars.archiver - if ar: - arflags = self.flag_vars.ar - self.set_commands(archiver=[ar]+arflags) - - self.set_library_dirs(self.get_library_dirs()) - self.set_libraries(self.get_libraries()) - - def dump_properties(self): - """Print out the attributes of a compiler instance.""" - props = [] - for key in list(self.executables.keys()) + \ - ['version', 'libraries', 'library_dirs', - 'object_switch', 'compile_switch']: - if hasattr(self, key): - v = getattr(self, key) - props.append((key, None, '= '+repr(v))) - props.sort() - - pretty_printer = FancyGetopt(props) - for l in pretty_printer.generate_help("%s instance properties:" \ - % (self.__class__.__name__)): - if l[:4]==' --': - l = ' ' + l[4:] - print(l) - - ################### - - def _compile(self, obj, src, ext, cc_args, extra_postargs, pp_opts): - """Compile 'src' to product 'obj'.""" - src_flags = {} - if Path(src).suffix.lower() in FORTRAN_COMMON_FIXED_EXTENSIONS \ - and not has_f90_header(src): - flavor = ':f77' - compiler = self.compiler_f77 - src_flags = get_f77flags(src) - extra_compile_args = self.extra_f77_compile_args or [] - elif is_free_format(src): - flavor = ':f90' - compiler = self.compiler_f90 - if compiler is None: - raise DistutilsExecError('f90 not supported by %s needed for %s'\ - % (self.__class__.__name__, src)) - extra_compile_args = self.extra_f90_compile_args or [] - else: - flavor = ':fix' - compiler = self.compiler_fix - if compiler is None: - raise DistutilsExecError('f90 (fixed) not supported by %s needed for %s'\ - % (self.__class__.__name__, src)) - extra_compile_args = self.extra_f90_compile_args or [] - if self.object_switch[-1]==' ': - o_args = [self.object_switch.strip(), obj] - else: - o_args = [self.object_switch.strip()+obj] - - assert self.compile_switch.strip() - s_args = [self.compile_switch, src] - - if extra_compile_args: - log.info('extra %s options: %r' \ - % (flavor[1:], ' '.join(extra_compile_args))) - - extra_flags = src_flags.get(self.compiler_type, []) - if extra_flags: - log.info('using compile options from source: %r' \ - % ' '.join(extra_flags)) - - command = compiler + cc_args + extra_flags + s_args + o_args \ - + extra_postargs + extra_compile_args - - display = '%s: %s' % (os.path.basename(compiler[0]) + flavor, - src) - try: - self.spawn(command, display=display) - except DistutilsExecError as e: - msg = str(e) - raise CompileError(msg) from None - - def module_options(self, module_dirs, module_build_dir): - options = [] - if self.module_dir_switch is not None: - if self.module_dir_switch[-1]==' ': - options.extend([self.module_dir_switch.strip(), module_build_dir]) - else: - options.append(self.module_dir_switch.strip()+module_build_dir) - else: - print('XXX: module_build_dir=%r option ignored' % (module_build_dir)) - print('XXX: Fix module_dir_switch for ', self.__class__.__name__) - if self.module_include_switch is not None: - for d in [module_build_dir]+module_dirs: - options.append('%s%s' % (self.module_include_switch, d)) - else: - print('XXX: module_dirs=%r option ignored' % (module_dirs)) - print('XXX: Fix module_include_switch for ', self.__class__.__name__) - return options - - def library_option(self, lib): - return "-l" + lib - def library_dir_option(self, dir): - return "-L" + dir - - def link(self, target_desc, objects, - output_filename, output_dir=None, libraries=None, - library_dirs=None, runtime_library_dirs=None, - export_symbols=None, debug=0, extra_preargs=None, - extra_postargs=None, build_temp=None, target_lang=None): - objects, output_dir = self._fix_object_args(objects, output_dir) - libraries, library_dirs, runtime_library_dirs = \ - self._fix_lib_args(libraries, library_dirs, runtime_library_dirs) - - lib_opts = gen_lib_options(self, library_dirs, runtime_library_dirs, - libraries) - if is_string(output_dir): - output_filename = os.path.join(output_dir, output_filename) - elif output_dir is not None: - raise TypeError("'output_dir' must be a string or None") - - if self._need_link(objects, output_filename): - if self.library_switch[-1]==' ': - o_args = [self.library_switch.strip(), output_filename] - else: - o_args = [self.library_switch.strip()+output_filename] - - if is_string(self.objects): - ld_args = objects + [self.objects] - else: - ld_args = objects + self.objects - ld_args = ld_args + lib_opts + o_args - if debug: - ld_args[:0] = ['-g'] - if extra_preargs: - ld_args[:0] = extra_preargs - if extra_postargs: - ld_args.extend(extra_postargs) - self.mkpath(os.path.dirname(output_filename)) - if target_desc == CCompiler.EXECUTABLE: - linker = self.linker_exe[:] - else: - linker = self.linker_so[:] - command = linker + ld_args - try: - self.spawn(command) - except DistutilsExecError as e: - msg = str(e) - raise LinkError(msg) from None - else: - log.debug("skipping %s (up-to-date)", output_filename) - - def _environment_hook(self, name, hook_name): - if hook_name is None: - return None - if is_string(hook_name): - if hook_name.startswith('self.'): - hook_name = hook_name[5:] - hook = getattr(self, hook_name) - return hook() - elif hook_name.startswith('exe.'): - hook_name = hook_name[4:] - var = self.executables[hook_name] - if var: - return var[0] - else: - return None - elif hook_name.startswith('flags.'): - hook_name = hook_name[6:] - hook = getattr(self, 'get_flags_' + hook_name) - return hook() - else: - return hook_name() - - def can_ccompiler_link(self, ccompiler): - """ - Check if the given C compiler can link objects produced by - this compiler. - """ - return True - - def wrap_unlinkable_objects(self, objects, output_dir, extra_dll_dir): - """ - Convert a set of object files that are not compatible with the default - linker, to a file that is compatible. - - Parameters - ---------- - objects : list - List of object files to include. - output_dir : str - Output directory to place generated object files. - extra_dll_dir : str - Output directory to place extra DLL files that need to be - included on Windows. - - Returns - ------- - converted_objects : list of str - List of converted object files. - Note that the number of output files is not necessarily - the same as inputs. - - """ - raise NotImplementedError() - - ## class FCompiler - -_default_compilers = ( - # sys.platform mappings - ('win32', ('gnu', 'intelv', 'absoft', 'compaqv', 'intelev', 'gnu95', 'g95', - 'intelvem', 'intelem', 'flang')), - ('cygwin.*', ('gnu', 'intelv', 'absoft', 'compaqv', 'intelev', 'gnu95', 'g95')), - ('linux.*', ('arm', 'gnu95', 'intel', 'lahey', 'pg', 'nv', 'absoft', 'nag', - 'vast', 'compaq', 'intele', 'intelem', 'gnu', 'g95', - 'pathf95', 'nagfor', 'fujitsu')), - ('darwin.*', ('gnu95', 'nag', 'nagfor', 'absoft', 'ibm', 'intel', 'gnu', - 'g95', 'pg')), - ('sunos.*', ('sun', 'gnu', 'gnu95', 'g95')), - ('irix.*', ('mips', 'gnu', 'gnu95',)), - ('aix.*', ('ibm', 'gnu', 'gnu95',)), - # os.name mappings - ('posix', ('gnu', 'gnu95',)), - ('nt', ('gnu', 'gnu95',)), - ('mac', ('gnu95', 'gnu', 'pg')), - ) - -fcompiler_class = None -fcompiler_aliases = None - -def load_all_fcompiler_classes(): - """Cache all the FCompiler classes found in modules in the - numpy.distutils.fcompiler package. - """ - from glob import glob - global fcompiler_class, fcompiler_aliases - if fcompiler_class is not None: - return - pys = os.path.join(os.path.dirname(__file__), '*.py') - fcompiler_class = {} - fcompiler_aliases = {} - for fname in glob(pys): - module_name, ext = os.path.splitext(os.path.basename(fname)) - module_name = 'numpy.distutils.fcompiler.' + module_name - __import__ (module_name) - module = sys.modules[module_name] - if hasattr(module, 'compilers'): - for cname in module.compilers: - klass = getattr(module, cname) - desc = (klass.compiler_type, klass, klass.description) - fcompiler_class[klass.compiler_type] = desc - for alias in klass.compiler_aliases: - if alias in fcompiler_aliases: - raise ValueError("alias %r defined for both %s and %s" - % (alias, klass.__name__, - fcompiler_aliases[alias][1].__name__)) - fcompiler_aliases[alias] = desc - -def _find_existing_fcompiler(compiler_types, - osname=None, platform=None, - requiref90=False, - c_compiler=None): - from numpy.distutils.core import get_distribution - dist = get_distribution(always=True) - for compiler_type in compiler_types: - v = None - try: - c = new_fcompiler(plat=platform, compiler=compiler_type, - c_compiler=c_compiler) - c.customize(dist) - v = c.get_version() - if requiref90 and c.compiler_f90 is None: - v = None - new_compiler = c.suggested_f90_compiler - if new_compiler: - log.warn('Trying %r compiler as suggested by %r ' - 'compiler for f90 support.' % (compiler_type, - new_compiler)) - c = new_fcompiler(plat=platform, compiler=new_compiler, - c_compiler=c_compiler) - c.customize(dist) - v = c.get_version() - if v is not None: - compiler_type = new_compiler - if requiref90 and c.compiler_f90 is None: - raise ValueError('%s does not support compiling f90 codes, ' - 'skipping.' % (c.__class__.__name__)) - except DistutilsModuleError: - log.debug("_find_existing_fcompiler: compiler_type='%s' raised DistutilsModuleError", compiler_type) - except CompilerNotFound: - log.debug("_find_existing_fcompiler: compiler_type='%s' not found", compiler_type) - if v is not None: - return compiler_type - return None - -def available_fcompilers_for_platform(osname=None, platform=None): - if osname is None: - osname = os.name - if platform is None: - platform = sys.platform - matching_compiler_types = [] - for pattern, compiler_type in _default_compilers: - if re.match(pattern, platform) or re.match(pattern, osname): - for ct in compiler_type: - if ct not in matching_compiler_types: - matching_compiler_types.append(ct) - if not matching_compiler_types: - matching_compiler_types.append('gnu') - return matching_compiler_types - -def get_default_fcompiler(osname=None, platform=None, requiref90=False, - c_compiler=None): - """Determine the default Fortran compiler to use for the given - platform.""" - matching_compiler_types = available_fcompilers_for_platform(osname, - platform) - log.info("get_default_fcompiler: matching types: '%s'", - matching_compiler_types) - compiler_type = _find_existing_fcompiler(matching_compiler_types, - osname=osname, - platform=platform, - requiref90=requiref90, - c_compiler=c_compiler) - return compiler_type - -# Flag to avoid rechecking for Fortran compiler every time -failed_fcompilers = set() - -def new_fcompiler(plat=None, - compiler=None, - verbose=0, - dry_run=0, - force=0, - requiref90=False, - c_compiler = None): - """Generate an instance of some FCompiler subclass for the supplied - platform/compiler combination. - """ - global failed_fcompilers - fcompiler_key = (plat, compiler) - if fcompiler_key in failed_fcompilers: - return None - - load_all_fcompiler_classes() - if plat is None: - plat = os.name - if compiler is None: - compiler = get_default_fcompiler(plat, requiref90=requiref90, - c_compiler=c_compiler) - if compiler in fcompiler_class: - module_name, klass, long_description = fcompiler_class[compiler] - elif compiler in fcompiler_aliases: - module_name, klass, long_description = fcompiler_aliases[compiler] - else: - msg = "don't know how to compile Fortran code on platform '%s'" % plat - if compiler is not None: - msg = msg + " with '%s' compiler." % compiler - msg = msg + " Supported compilers are: %s)" \ - % (','.join(fcompiler_class.keys())) - log.warn(msg) - failed_fcompilers.add(fcompiler_key) - return None - - compiler = klass(verbose=verbose, dry_run=dry_run, force=force) - compiler.c_compiler = c_compiler - return compiler - -def show_fcompilers(dist=None): - """Print list of available compilers (used by the "--help-fcompiler" - option to "config_fc"). - """ - if dist is None: - from distutils.dist import Distribution - from numpy.distutils.command.config_compiler import config_fc - dist = Distribution() - dist.script_name = os.path.basename(sys.argv[0]) - dist.script_args = ['config_fc'] + sys.argv[1:] - try: - dist.script_args.remove('--help-fcompiler') - except ValueError: - pass - dist.cmdclass['config_fc'] = config_fc - dist.parse_config_files() - dist.parse_command_line() - compilers = [] - compilers_na = [] - compilers_ni = [] - if not fcompiler_class: - load_all_fcompiler_classes() - platform_compilers = available_fcompilers_for_platform() - for compiler in platform_compilers: - v = None - log.set_verbosity(-2) - try: - c = new_fcompiler(compiler=compiler, verbose=dist.verbose) - c.customize(dist) - v = c.get_version() - except (DistutilsModuleError, CompilerNotFound) as e: - log.debug("show_fcompilers: %s not found" % (compiler,)) - log.debug(repr(e)) - - if v is None: - compilers_na.append(("fcompiler="+compiler, None, - fcompiler_class[compiler][2])) - else: - c.dump_properties() - compilers.append(("fcompiler="+compiler, None, - fcompiler_class[compiler][2] + ' (%s)' % v)) - - compilers_ni = list(set(fcompiler_class.keys()) - set(platform_compilers)) - compilers_ni = [("fcompiler="+fc, None, fcompiler_class[fc][2]) - for fc in compilers_ni] - - compilers.sort() - compilers_na.sort() - compilers_ni.sort() - pretty_printer = FancyGetopt(compilers) - pretty_printer.print_help("Fortran compilers found:") - pretty_printer = FancyGetopt(compilers_na) - pretty_printer.print_help("Compilers available for this " - "platform, but not found:") - if compilers_ni: - pretty_printer = FancyGetopt(compilers_ni) - pretty_printer.print_help("Compilers not available on this platform:") - print("For compiler details, run 'config_fc --verbose' setup command.") - - -def dummy_fortran_file(): - fo, name = make_temp_file(suffix='.f') - fo.write(" subroutine dummy()\n end\n") - fo.close() - return name[:-2] - - -_has_f_header = re.compile(r'-\*-\s*fortran\s*-\*-', re.I).search -_has_f90_header = re.compile(r'-\*-\s*f90\s*-\*-', re.I).search -_has_fix_header = re.compile(r'-\*-\s*fix\s*-\*-', re.I).search -_free_f90_start = re.compile(r'[^c*!]\s*[^\s\d\t]', re.I).match - -def is_free_format(file): - """Check if file is in free format Fortran.""" - # f90 allows both fixed and free format, assuming fixed unless - # signs of free format are detected. - result = 0 - with open(file, encoding='latin1') as f: - line = f.readline() - n = 10000 # the number of non-comment lines to scan for hints - if _has_f_header(line) or _has_fix_header(line): - n = 0 - elif _has_f90_header(line): - n = 0 - result = 1 - while n>0 and line: - line = line.rstrip() - if line and line[0]!='!': - n -= 1 - if (line[0]!='\t' and _free_f90_start(line[:5])) or line[-1:]=='&': - result = 1 - break - line = f.readline() - return result - -def has_f90_header(src): - with open(src, encoding='latin1') as f: - line = f.readline() - return _has_f90_header(line) or _has_fix_header(line) - -_f77flags_re = re.compile(r'(c|)f77flags\s*\(\s*(?P\w+)\s*\)\s*=\s*(?P.*)', re.I) -def get_f77flags(src): - """ - Search the first 20 lines of fortran 77 code for line pattern - `CF77FLAGS()=` - Return a dictionary {:}. - """ - flags = {} - with open(src, encoding='latin1') as f: - i = 0 - for line in f: - i += 1 - if i>20: break - m = _f77flags_re.match(line) - if not m: continue - fcname = m.group('fcname').strip() - fflags = m.group('fflags').strip() - flags[fcname] = split_quoted(fflags) - return flags - -# TODO: implement get_f90flags and use it in _compile similarly to get_f77flags - -if __name__ == '__main__': - show_fcompilers() diff --git a/numpy/distutils/fcompiler/absoft.py b/numpy/distutils/fcompiler/absoft.py deleted file mode 100644 index e013def5d1a4..000000000000 --- a/numpy/distutils/fcompiler/absoft.py +++ /dev/null @@ -1,158 +0,0 @@ - -# Absoft Corporation ceased operations on 12/31/2022. -# Thus, all links to are invalid. - -# Notes: -# - when using -g77 then use -DUNDERSCORE_G77 to compile f2py -# generated extension modules (works for f2py v2.45.241_1936 and up) -import os - -from numpy.distutils.cpuinfo import cpu -from numpy.distutils.fcompiler import FCompiler, dummy_fortran_file -from numpy.distutils.misc_util import cyg2win32 - -compilers = ['AbsoftFCompiler'] - -class AbsoftFCompiler(FCompiler): - - compiler_type = 'absoft' - description = 'Absoft Corp Fortran Compiler' - #version_pattern = r'FORTRAN 77 Compiler (?P[^\s*,]*).*?Absoft Corp' - version_pattern = r'(f90:.*?(Absoft Pro FORTRAN Version|FORTRAN 77 Compiler'\ - r'|Absoft Fortran Compiler Version'\ - r'|Copyright Absoft Corporation.*?Version))'\ - r' (?P[^\s*,]*)(.*?Absoft Corp|)' - - # on windows: f90 -V -c dummy.f - # f90: Copyright Absoft Corporation 1994-1998 mV2; Cray Research, Inc. 1994-1996 CF90 (2.x.x.x f36t87) Version 2.3 Wed Apr 19, 2006 13:05:16 - - # samt5735(8)$ f90 -V -c dummy.f - # f90: Copyright Absoft Corporation 1994-2002; Absoft Pro FORTRAN Version 8.0 - # Note that fink installs g77 as f77, so need to use f90 for detection. - - executables = { - 'version_cmd' : None, # set by update_executables - 'compiler_f77' : ["f77"], - 'compiler_fix' : ["f90"], - 'compiler_f90' : ["f90"], - 'linker_so' : [""], - 'archiver' : ["ar", "-cr"], - 'ranlib' : ["ranlib"] - } - - if os.name=='nt': - library_switch = '/out:' #No space after /out:! - - module_dir_switch = None - module_include_switch = '-p' - - def update_executables(self): - f = cyg2win32(dummy_fortran_file()) - self.executables['version_cmd'] = ['', '-V', '-c', - f+'.f', '-o', f+'.o'] - - def get_flags_linker_so(self): - if os.name=='nt': - opt = ['/dll'] - # The "-K shared" switches are being left in for pre-9.0 versions - # of Absoft though I don't think versions earlier than 9 can - # actually be used to build shared libraries. In fact, version - # 8 of Absoft doesn't recognize "-K shared" and will fail. - elif self.get_version() >= '9.0': - opt = ['-shared'] - else: - opt = ["-K", "shared"] - return opt - - def library_dir_option(self, dir): - if os.name=='nt': - return ['-link', '/PATH:%s' % (dir)] - return "-L" + dir - - def library_option(self, lib): - if os.name=='nt': - return '%s.lib' % (lib) - return "-l" + lib - - def get_library_dirs(self): - opt = FCompiler.get_library_dirs(self) - d = os.environ.get('ABSOFT') - if d: - if self.get_version() >= '10.0': - # use shared libraries, the static libraries were not compiled -fPIC - prefix = 'sh' - else: - prefix = '' - if cpu.is_64bit(): - suffix = '64' - else: - suffix = '' - opt.append(os.path.join(d, '%slib%s' % (prefix, suffix))) - return opt - - def get_libraries(self): - opt = FCompiler.get_libraries(self) - if self.get_version() >= '11.0': - opt.extend(['af90math', 'afio', 'af77math', 'amisc']) - elif self.get_version() >= '10.0': - opt.extend(['af90math', 'afio', 'af77math', 'U77']) - elif self.get_version() >= '8.0': - opt.extend(['f90math', 'fio', 'f77math', 'U77']) - else: - opt.extend(['fio', 'f90math', 'fmath', 'U77']) - if os.name =='nt': - opt.append('COMDLG32') - return opt - - def get_flags(self): - opt = FCompiler.get_flags(self) - if os.name != 'nt': - opt.extend(['-s']) - if self.get_version(): - if self.get_version()>='8.2': - opt.append('-fpic') - return opt - - def get_flags_f77(self): - opt = FCompiler.get_flags_f77(self) - opt.extend(['-N22', '-N90', '-N110']) - v = self.get_version() - if os.name == 'nt': - if v and v>='8.0': - opt.extend(['-f', '-N15']) - else: - opt.append('-f') - if v: - if v<='4.6': - opt.append('-B108') - else: - # Though -N15 is undocumented, it works with - # Absoft 8.0 on Linux - opt.append('-N15') - return opt - - def get_flags_f90(self): - opt = FCompiler.get_flags_f90(self) - opt.extend(["-YCFRL=1", "-YCOM_NAMES=LCS", "-YCOM_PFX", "-YEXT_PFX", - "-YCOM_SFX=_", "-YEXT_SFX=_", "-YEXT_NAMES=LCS"]) - if self.get_version(): - if self.get_version()>'4.6': - opt.extend(["-YDEALLOC=ALL"]) - return opt - - def get_flags_fix(self): - opt = FCompiler.get_flags_fix(self) - opt.extend(["-YCFRL=1", "-YCOM_NAMES=LCS", "-YCOM_PFX", "-YEXT_PFX", - "-YCOM_SFX=_", "-YEXT_SFX=_", "-YEXT_NAMES=LCS"]) - opt.extend(["-f", "fixed"]) - return opt - - def get_flags_opt(self): - opt = ['-O'] - return opt - -if __name__ == '__main__': - from distutils import log - log.set_verbosity(2) - from numpy.distutils import customized_fcompiler - print(customized_fcompiler(compiler='absoft').get_version()) diff --git a/numpy/distutils/fcompiler/arm.py b/numpy/distutils/fcompiler/arm.py deleted file mode 100644 index 3eb7e9af9c8c..000000000000 --- a/numpy/distutils/fcompiler/arm.py +++ /dev/null @@ -1,71 +0,0 @@ -import sys - -from numpy.distutils.fcompiler import FCompiler, dummy_fortran_file -from sys import platform -from os.path import join, dirname, normpath - -compilers = ['ArmFlangCompiler'] - -import functools - -class ArmFlangCompiler(FCompiler): - compiler_type = 'arm' - description = 'Arm Compiler' - version_pattern = r'\s*Arm.*version (?P[\d.-]+).*' - - ar_exe = 'lib.exe' - possible_executables = ['armflang'] - - executables = { - 'version_cmd': ["", "--version"], - 'compiler_f77': ["armflang", "-fPIC"], - 'compiler_fix': ["armflang", "-fPIC", "-ffixed-form"], - 'compiler_f90': ["armflang", "-fPIC"], - 'linker_so': ["armflang", "-fPIC", "-shared"], - 'archiver': ["ar", "-cr"], - 'ranlib': None - } - - pic_flags = ["-fPIC", "-DPIC"] - c_compiler = 'arm' - module_dir_switch = '-module ' # Don't remove ending space! - - def get_libraries(self): - opt = FCompiler.get_libraries(self) - opt.extend(['flang', 'flangrti', 'ompstub']) - return opt - - @functools.lru_cache(maxsize=128) - def get_library_dirs(self): - """List of compiler library directories.""" - opt = FCompiler.get_library_dirs(self) - flang_dir = dirname(self.executables['compiler_f77'][0]) - opt.append(normpath(join(flang_dir, '..', 'lib'))) - - return opt - - def get_flags(self): - return [] - - def get_flags_free(self): - return [] - - def get_flags_debug(self): - return ['-g'] - - def get_flags_opt(self): - return ['-O3'] - - def get_flags_arch(self): - return [] - - def runtime_library_dir_option(self, dir): - return '-Wl,-rpath=%s' % dir - - -if __name__ == '__main__': - from distutils import log - log.set_verbosity(2) - from numpy.distutils import customized_fcompiler - print(customized_fcompiler(compiler='armflang').get_version()) - diff --git a/numpy/distutils/fcompiler/compaq.py b/numpy/distutils/fcompiler/compaq.py deleted file mode 100644 index 01314c136acf..000000000000 --- a/numpy/distutils/fcompiler/compaq.py +++ /dev/null @@ -1,120 +0,0 @@ - -#http://www.compaq.com/fortran/docs/ -import os -import sys - -from numpy.distutils.fcompiler import FCompiler -from distutils.errors import DistutilsPlatformError - -compilers = ['CompaqFCompiler'] -if os.name != 'posix' or sys.platform[:6] == 'cygwin' : - # Otherwise we'd get a false positive on posix systems with - # case-insensitive filesystems (like darwin), because we'll pick - # up /bin/df - compilers.append('CompaqVisualFCompiler') - -class CompaqFCompiler(FCompiler): - - compiler_type = 'compaq' - description = 'Compaq Fortran Compiler' - version_pattern = r'Compaq Fortran (?P[^\s]*).*' - - if sys.platform[:5]=='linux': - fc_exe = 'fort' - else: - fc_exe = 'f90' - - executables = { - 'version_cmd' : ['', "-version"], - 'compiler_f77' : [fc_exe, "-f77rtl", "-fixed"], - 'compiler_fix' : [fc_exe, "-fixed"], - 'compiler_f90' : [fc_exe], - 'linker_so' : [''], - 'archiver' : ["ar", "-cr"], - 'ranlib' : ["ranlib"] - } - - module_dir_switch = '-module ' # not tested - module_include_switch = '-I' - - def get_flags(self): - return ['-assume no2underscore', '-nomixed_str_len_arg'] - def get_flags_debug(self): - return ['-g', '-check bounds'] - def get_flags_opt(self): - return ['-O4', '-align dcommons', '-assume bigarrays', - '-assume nozsize', '-math_library fast'] - def get_flags_arch(self): - return ['-arch host', '-tune host'] - def get_flags_linker_so(self): - if sys.platform[:5]=='linux': - return ['-shared'] - return ['-shared', '-Wl,-expect_unresolved,*'] - -class CompaqVisualFCompiler(FCompiler): - - compiler_type = 'compaqv' - description = 'DIGITAL or Compaq Visual Fortran Compiler' - version_pattern = (r'(DIGITAL|Compaq) Visual Fortran Optimizing Compiler' - r' Version (?P[^\s]*).*') - - compile_switch = '/compile_only' - object_switch = '/object:' - library_switch = '/OUT:' #No space after /OUT:! - - static_lib_extension = ".lib" - static_lib_format = "%s%s" - module_dir_switch = '/module:' - module_include_switch = '/I' - - ar_exe = 'lib.exe' - fc_exe = 'DF' - - if sys.platform=='win32': - from numpy.distutils.msvccompiler import MSVCCompiler - - try: - m = MSVCCompiler() - m.initialize() - ar_exe = m.lib - except DistutilsPlatformError: - pass - except AttributeError as e: - if '_MSVCCompiler__root' in str(e): - print('Ignoring "%s" (I think it is msvccompiler.py bug)' % (e)) - else: - raise - except OSError as e: - if not "vcvarsall.bat" in str(e): - print("Unexpected OSError in", __file__) - raise - except ValueError as e: - if not "'path'" in str(e): - print("Unexpected ValueError in", __file__) - raise - - executables = { - 'version_cmd' : ['', "/what"], - 'compiler_f77' : [fc_exe, "/f77rtl", "/fixed"], - 'compiler_fix' : [fc_exe, "/fixed"], - 'compiler_f90' : [fc_exe], - 'linker_so' : [''], - 'archiver' : [ar_exe, "/OUT:"], - 'ranlib' : None - } - - def get_flags(self): - return ['/nologo', '/MD', '/WX', '/iface=(cref,nomixed_str_len_arg)', - '/names:lowercase', '/assume:underscore'] - def get_flags_opt(self): - return ['/Ox', '/fast', '/optimize:5', '/unroll:0', '/math_library:fast'] - def get_flags_arch(self): - return ['/threads'] - def get_flags_debug(self): - return ['/debug'] - -if __name__ == '__main__': - from distutils import log - log.set_verbosity(2) - from numpy.distutils import customized_fcompiler - print(customized_fcompiler(compiler='compaq').get_version()) diff --git a/numpy/distutils/fcompiler/environment.py b/numpy/distutils/fcompiler/environment.py deleted file mode 100644 index ecd4d9989279..000000000000 --- a/numpy/distutils/fcompiler/environment.py +++ /dev/null @@ -1,88 +0,0 @@ -import os -from distutils.dist import Distribution - -__metaclass__ = type - -class EnvironmentConfig: - def __init__(self, distutils_section='ALL', **kw): - self._distutils_section = distutils_section - self._conf_keys = kw - self._conf = None - self._hook_handler = None - - def dump_variable(self, name): - conf_desc = self._conf_keys[name] - hook, envvar, confvar, convert, append = conf_desc - if not convert: - convert = lambda x : x - print('%s.%s:' % (self._distutils_section, name)) - v = self._hook_handler(name, hook) - print(' hook : %s' % (convert(v),)) - if envvar: - v = os.environ.get(envvar, None) - print(' environ: %s' % (convert(v),)) - if confvar and self._conf: - v = self._conf.get(confvar, (None, None))[1] - print(' config : %s' % (convert(v),)) - - def dump_variables(self): - for name in self._conf_keys: - self.dump_variable(name) - - def __getattr__(self, name): - try: - conf_desc = self._conf_keys[name] - except KeyError: - raise AttributeError( - f"'EnvironmentConfig' object has no attribute '{name}'" - ) from None - - return self._get_var(name, conf_desc) - - def get(self, name, default=None): - try: - conf_desc = self._conf_keys[name] - except KeyError: - return default - var = self._get_var(name, conf_desc) - if var is None: - var = default - return var - - def _get_var(self, name, conf_desc): - hook, envvar, confvar, convert, append = conf_desc - if convert is None: - convert = lambda x: x - var = self._hook_handler(name, hook) - if envvar is not None: - envvar_contents = os.environ.get(envvar) - if envvar_contents is not None: - envvar_contents = convert(envvar_contents) - if var and append: - if os.environ.get('NPY_DISTUTILS_APPEND_FLAGS', '1') == '1': - var.extend(envvar_contents) - else: - # NPY_DISTUTILS_APPEND_FLAGS was explicitly set to 0 - # to keep old (overwrite flags rather than append to - # them) behavior - var = envvar_contents - else: - var = envvar_contents - if confvar is not None and self._conf: - if confvar in self._conf: - source, confvar_contents = self._conf[confvar] - var = convert(confvar_contents) - return var - - - def clone(self, hook_handler): - ec = self.__class__(distutils_section=self._distutils_section, - **self._conf_keys) - ec._hook_handler = hook_handler - return ec - - def use_distribution(self, dist): - if isinstance(dist, Distribution): - self._conf = dist.get_option_dict(self._distutils_section) - else: - self._conf = dist diff --git a/numpy/distutils/fcompiler/fujitsu.py b/numpy/distutils/fcompiler/fujitsu.py deleted file mode 100644 index ddce67456d18..000000000000 --- a/numpy/distutils/fcompiler/fujitsu.py +++ /dev/null @@ -1,46 +0,0 @@ -""" -fujitsu - -Supports Fujitsu compiler function. -This compiler is developed by Fujitsu and is used in A64FX on Fugaku. -""" -from numpy.distutils.fcompiler import FCompiler - -compilers = ['FujitsuFCompiler'] - -class FujitsuFCompiler(FCompiler): - compiler_type = 'fujitsu' - description = 'Fujitsu Fortran Compiler' - - possible_executables = ['frt'] - version_pattern = r'frt \(FRT\) (?P[a-z\d.]+)' - # $ frt --version - # frt (FRT) x.x.x yyyymmdd - - executables = { - 'version_cmd' : ["", "--version"], - 'compiler_f77' : ["frt", "-Fixed"], - 'compiler_fix' : ["frt", "-Fixed"], - 'compiler_f90' : ["frt"], - 'linker_so' : ["frt", "-shared"], - 'archiver' : ["ar", "-cr"], - 'ranlib' : ["ranlib"] - } - pic_flags = ['-KPIC'] - module_dir_switch = '-M' - module_include_switch = '-I' - - def get_flags_opt(self): - return ['-O3'] - def get_flags_debug(self): - return ['-g'] - def runtime_library_dir_option(self, dir): - return f'-Wl,-rpath={dir}' - def get_libraries(self): - return ['fj90f', 'fj90i', 'fjsrcinfo'] - -if __name__ == '__main__': - from distutils import log - from numpy.distutils import customized_fcompiler - log.set_verbosity(2) - print(customized_fcompiler('fujitsu').get_version()) diff --git a/numpy/distutils/fcompiler/g95.py b/numpy/distutils/fcompiler/g95.py deleted file mode 100644 index e109a972a872..000000000000 --- a/numpy/distutils/fcompiler/g95.py +++ /dev/null @@ -1,42 +0,0 @@ -# http://g95.sourceforge.net/ -from numpy.distutils.fcompiler import FCompiler - -compilers = ['G95FCompiler'] - -class G95FCompiler(FCompiler): - compiler_type = 'g95' - description = 'G95 Fortran Compiler' - -# version_pattern = r'G95 \((GCC (?P[\d.]+)|.*?) \(g95!\) (?P.*)\).*' - # $ g95 --version - # G95 (GCC 4.0.3 (g95!) May 22 2006) - - version_pattern = r'G95 \((GCC (?P[\d.]+)|.*?) \(g95 (?P.*)!\) (?P.*)\).*' - # $ g95 --version - # G95 (GCC 4.0.3 (g95 0.90!) Aug 22 2006) - - executables = { - 'version_cmd' : ["", "--version"], - 'compiler_f77' : ["g95", "-ffixed-form"], - 'compiler_fix' : ["g95", "-ffixed-form"], - 'compiler_f90' : ["g95"], - 'linker_so' : ["", "-shared"], - 'archiver' : ["ar", "-cr"], - 'ranlib' : ["ranlib"] - } - pic_flags = ['-fpic'] - module_dir_switch = '-fmod=' - module_include_switch = '-I' - - def get_flags(self): - return ['-fno-second-underscore'] - def get_flags_opt(self): - return ['-O'] - def get_flags_debug(self): - return ['-g'] - -if __name__ == '__main__': - from distutils import log - from numpy.distutils import customized_fcompiler - log.set_verbosity(2) - print(customized_fcompiler('g95').get_version()) diff --git a/numpy/distutils/fcompiler/gnu.py b/numpy/distutils/fcompiler/gnu.py deleted file mode 100644 index 474ee35945b2..000000000000 --- a/numpy/distutils/fcompiler/gnu.py +++ /dev/null @@ -1,555 +0,0 @@ -import re -import os -import sys -import warnings -import platform -import tempfile -import hashlib -import base64 -import subprocess -from subprocess import Popen, PIPE, STDOUT -from numpy.distutils.exec_command import filepath_from_subprocess_output -from numpy.distutils.fcompiler import FCompiler -from distutils.version import LooseVersion - -compilers = ['GnuFCompiler', 'Gnu95FCompiler'] - -TARGET_R = re.compile(r"Target: ([a-zA-Z0-9_\-]*)") - -# XXX: handle cross compilation - - -def is_win64(): - return sys.platform == "win32" and platform.architecture()[0] == "64bit" - - -class GnuFCompiler(FCompiler): - compiler_type = 'gnu' - compiler_aliases = ('g77', ) - description = 'GNU Fortran 77 compiler' - - def gnu_version_match(self, version_string): - """Handle the different versions of GNU fortran compilers""" - # Strip warning(s) that may be emitted by gfortran - while version_string.startswith('gfortran: warning'): - version_string =\ - version_string[version_string.find('\n') + 1:].strip() - - # Gfortran versions from after 2010 will output a simple string - # (usually "x.y", "x.y.z" or "x.y.z-q") for ``-dumpversion``; older - # gfortrans may still return long version strings (``-dumpversion`` was - # an alias for ``--version``) - if len(version_string) <= 20: - # Try to find a valid version string - m = re.search(r'([0-9.]+)', version_string) - if m: - # g77 provides a longer version string that starts with GNU - # Fortran - if version_string.startswith('GNU Fortran'): - return ('g77', m.group(1)) - - # gfortran only outputs a version string such as #.#.#, so check - # if the match is at the start of the string - elif m.start() == 0: - return ('gfortran', m.group(1)) - else: - # Output probably from --version, try harder: - m = re.search(r'GNU Fortran\s+95.*?([0-9-.]+)', version_string) - if m: - return ('gfortran', m.group(1)) - m = re.search( - r'GNU Fortran.*?\-?([0-9-.]+\.[0-9-.]+)', version_string) - if m: - v = m.group(1) - if v.startswith(('0', '2', '3')): - # the '0' is for early g77's - return ('g77', v) - else: - # at some point in the 4.x series, the ' 95' was dropped - # from the version string - return ('gfortran', v) - - # If still nothing, raise an error to make the problem easy to find. - err = 'A valid Fortran version was not found in this string:\n' - raise ValueError(err + version_string) - - def version_match(self, version_string): - v = self.gnu_version_match(version_string) - if not v or v[0] != 'g77': - return None - return v[1] - - possible_executables = ['g77', 'f77'] - executables = { - 'version_cmd' : [None, "-dumpversion"], - 'compiler_f77' : [None, "-g", "-Wall", "-fno-second-underscore"], - 'compiler_f90' : None, # Use --fcompiler=gnu95 for f90 codes - 'compiler_fix' : None, - 'linker_so' : [None, "-g", "-Wall"], - 'archiver' : ["ar", "-cr"], - 'ranlib' : ["ranlib"], - 'linker_exe' : [None, "-g", "-Wall"] - } - module_dir_switch = None - module_include_switch = None - - # Cygwin: f771: warning: -fPIC ignored for target (all code is - # position independent) - if os.name != 'nt' and sys.platform != 'cygwin': - pic_flags = ['-fPIC'] - - # use -mno-cygwin for g77 when Python is not Cygwin-Python - if sys.platform == 'win32': - for key in ['version_cmd', 'compiler_f77', 'linker_so', 'linker_exe']: - executables[key].append('-mno-cygwin') - - g2c = 'g2c' - suggested_f90_compiler = 'gnu95' - - def get_flags_linker_so(self): - opt = self.linker_so[1:] - if sys.platform == 'darwin': - target = os.environ.get('MACOSX_DEPLOYMENT_TARGET', None) - # If MACOSX_DEPLOYMENT_TARGET is set, we simply trust the value - # and leave it alone. But, distutils will complain if the - # environment's value is different from the one in the Python - # Makefile used to build Python. We let distutils handle this - # error checking. - if not target: - # If MACOSX_DEPLOYMENT_TARGET is not set in the environment, - # we try to get it first from sysconfig and then - # fall back to setting it to 10.9 This is a reasonable default - # even when using the official Python dist and those derived - # from it. - import sysconfig - target = sysconfig.get_config_var('MACOSX_DEPLOYMENT_TARGET') - if not target: - target = '10.9' - s = f'Env. variable MACOSX_DEPLOYMENT_TARGET set to {target}' - warnings.warn(s, stacklevel=2) - os.environ['MACOSX_DEPLOYMENT_TARGET'] = str(target) - opt.extend(['-undefined', 'dynamic_lookup', '-bundle']) - else: - opt.append("-shared") - if sys.platform.startswith('sunos'): - # SunOS often has dynamically loaded symbols defined in the - # static library libg2c.a The linker doesn't like this. To - # ignore the problem, use the -mimpure-text flag. It isn't - # the safest thing, but seems to work. 'man gcc' says: - # ".. Instead of using -mimpure-text, you should compile all - # source code with -fpic or -fPIC." - opt.append('-mimpure-text') - return opt - - def get_libgcc_dir(self): - try: - output = subprocess.check_output(self.compiler_f77 + - ['-print-libgcc-file-name']) - except (OSError, subprocess.CalledProcessError): - pass - else: - output = filepath_from_subprocess_output(output) - return os.path.dirname(output) - return None - - def get_libgfortran_dir(self): - if sys.platform[:5] == 'linux': - libgfortran_name = 'libgfortran.so' - elif sys.platform == 'darwin': - libgfortran_name = 'libgfortran.dylib' - else: - libgfortran_name = None - - libgfortran_dir = None - if libgfortran_name: - find_lib_arg = ['-print-file-name={0}'.format(libgfortran_name)] - try: - output = subprocess.check_output( - self.compiler_f77 + find_lib_arg) - except (OSError, subprocess.CalledProcessError): - pass - else: - output = filepath_from_subprocess_output(output) - libgfortran_dir = os.path.dirname(output) - return libgfortran_dir - - def get_library_dirs(self): - opt = [] - if sys.platform[:5] != 'linux': - d = self.get_libgcc_dir() - if d: - # if windows and not cygwin, libg2c lies in a different folder - if sys.platform == 'win32' and not d.startswith('/usr/lib'): - d = os.path.normpath(d) - path = os.path.join(d, "lib%s.a" % self.g2c) - if not os.path.exists(path): - root = os.path.join(d, *((os.pardir, ) * 4)) - d2 = os.path.abspath(os.path.join(root, 'lib')) - path = os.path.join(d2, "lib%s.a" % self.g2c) - if os.path.exists(path): - opt.append(d2) - opt.append(d) - # For Macports / Linux, libgfortran and libgcc are not co-located - lib_gfortran_dir = self.get_libgfortran_dir() - if lib_gfortran_dir: - opt.append(lib_gfortran_dir) - return opt - - def get_libraries(self): - opt = [] - d = self.get_libgcc_dir() - if d is not None: - g2c = self.g2c + '-pic' - f = self.static_lib_format % (g2c, self.static_lib_extension) - if not os.path.isfile(os.path.join(d, f)): - g2c = self.g2c - else: - g2c = self.g2c - - if g2c is not None: - opt.append(g2c) - c_compiler = self.c_compiler - if sys.platform == 'win32' and c_compiler and \ - c_compiler.compiler_type == 'msvc': - opt.append('gcc') - if sys.platform == 'darwin': - opt.append('cc_dynamic') - return opt - - def get_flags_debug(self): - return ['-g'] - - def get_flags_opt(self): - v = self.get_version() - if v and v <= '3.3.3': - # With this compiler version building Fortran BLAS/LAPACK - # with -O3 caused failures in lib.lapack heevr,syevr tests. - opt = ['-O2'] - else: - opt = ['-O3'] - opt.append('-funroll-loops') - return opt - - def _c_arch_flags(self): - """ Return detected arch flags from CFLAGS """ - import sysconfig - try: - cflags = sysconfig.get_config_vars()['CFLAGS'] - except KeyError: - return [] - arch_re = re.compile(r"-arch\s+(\w+)") - arch_flags = [] - for arch in arch_re.findall(cflags): - arch_flags += ['-arch', arch] - return arch_flags - - def get_flags_arch(self): - return [] - - def runtime_library_dir_option(self, dir): - if sys.platform == 'win32' or sys.platform == 'cygwin': - # Linux/Solaris/Unix support RPATH, Windows does not - raise NotImplementedError - - # TODO: could use -Xlinker here, if it's supported - assert "," not in dir - - if sys.platform == 'darwin': - return f'-Wl,-rpath,{dir}' - elif sys.platform.startswith(('aix', 'os400')): - # AIX RPATH is called LIBPATH - return f'-Wl,-blibpath:{dir}' - else: - return f'-Wl,-rpath={dir}' - - -class Gnu95FCompiler(GnuFCompiler): - compiler_type = 'gnu95' - compiler_aliases = ('gfortran', ) - description = 'GNU Fortran 95 compiler' - - def version_match(self, version_string): - v = self.gnu_version_match(version_string) - if not v or v[0] != 'gfortran': - return None - v = v[1] - if LooseVersion(v) >= "4": - # gcc-4 series releases do not support -mno-cygwin option - pass - else: - # use -mno-cygwin flag for gfortran when Python is not - # Cygwin-Python - if sys.platform == 'win32': - for key in [ - 'version_cmd', 'compiler_f77', 'compiler_f90', - 'compiler_fix', 'linker_so', 'linker_exe' - ]: - self.executables[key].append('-mno-cygwin') - return v - - possible_executables = ['gfortran', 'f95'] - executables = { - 'version_cmd' : ["", "-dumpversion"], - 'compiler_f77' : [None, "-Wall", "-g", "-ffixed-form", - "-fno-second-underscore"], - 'compiler_f90' : [None, "-Wall", "-g", - "-fno-second-underscore"], - 'compiler_fix' : [None, "-Wall", "-g","-ffixed-form", - "-fno-second-underscore"], - 'linker_so' : ["", "-Wall", "-g"], - 'archiver' : ["ar", "-cr"], - 'ranlib' : ["ranlib"], - 'linker_exe' : [None, "-Wall"] - } - - module_dir_switch = '-J' - module_include_switch = '-I' - - if sys.platform.startswith(('aix', 'os400')): - executables['linker_so'].append('-lpthread') - if platform.architecture()[0][:2] == '64': - for key in ['compiler_f77', 'compiler_f90','compiler_fix','linker_so', 'linker_exe']: - executables[key].append('-maix64') - - g2c = 'gfortran' - - def _universal_flags(self, cmd): - """Return a list of -arch flags for every supported architecture.""" - if not sys.platform == 'darwin': - return [] - arch_flags = [] - # get arches the C compiler gets. - c_archs = self._c_arch_flags() - if "i386" in c_archs: - c_archs[c_archs.index("i386")] = "i686" - # check the arches the Fortran compiler supports, and compare with - # arch flags from C compiler - for arch in ["ppc", "i686", "x86_64", "ppc64", "s390x"]: - if _can_target(cmd, arch) and arch in c_archs: - arch_flags.extend(["-arch", arch]) - return arch_flags - - def get_flags(self): - flags = GnuFCompiler.get_flags(self) - arch_flags = self._universal_flags(self.compiler_f90) - if arch_flags: - flags[:0] = arch_flags - return flags - - def get_flags_linker_so(self): - flags = GnuFCompiler.get_flags_linker_so(self) - arch_flags = self._universal_flags(self.linker_so) - if arch_flags: - flags[:0] = arch_flags - return flags - - def get_library_dirs(self): - opt = GnuFCompiler.get_library_dirs(self) - if sys.platform == 'win32': - c_compiler = self.c_compiler - if c_compiler and c_compiler.compiler_type == "msvc": - target = self.get_target() - if target: - d = os.path.normpath(self.get_libgcc_dir()) - root = os.path.join(d, *((os.pardir, ) * 4)) - path = os.path.join(root, "lib") - mingwdir = os.path.normpath(path) - if os.path.exists(os.path.join(mingwdir, "libmingwex.a")): - opt.append(mingwdir) - # For Macports / Linux, libgfortran and libgcc are not co-located - lib_gfortran_dir = self.get_libgfortran_dir() - if lib_gfortran_dir: - opt.append(lib_gfortran_dir) - return opt - - def get_libraries(self): - opt = GnuFCompiler.get_libraries(self) - if sys.platform == 'darwin': - opt.remove('cc_dynamic') - if sys.platform == 'win32': - c_compiler = self.c_compiler - if c_compiler and c_compiler.compiler_type == "msvc": - if "gcc" in opt: - i = opt.index("gcc") - opt.insert(i + 1, "mingwex") - opt.insert(i + 1, "mingw32") - c_compiler = self.c_compiler - if c_compiler and c_compiler.compiler_type == "msvc": - return [] - else: - pass - return opt - - def get_target(self): - try: - p = subprocess.Popen( - self.compiler_f77 + ['-v'], - stdin=subprocess.PIPE, - stderr=subprocess.PIPE, - ) - stdout, stderr = p.communicate() - output = (stdout or b"") + (stderr or b"") - except (OSError, subprocess.CalledProcessError): - pass - else: - output = filepath_from_subprocess_output(output) - m = TARGET_R.search(output) - if m: - return m.group(1) - return "" - - def _hash_files(self, filenames): - h = hashlib.sha1() - for fn in filenames: - with open(fn, 'rb') as f: - while True: - block = f.read(131072) - if not block: - break - h.update(block) - text = base64.b32encode(h.digest()) - text = text.decode('ascii') - return text.rstrip('=') - - def _link_wrapper_lib(self, objects, output_dir, extra_dll_dir, - chained_dlls, is_archive): - """Create a wrapper shared library for the given objects - - Return an MSVC-compatible lib - """ - - c_compiler = self.c_compiler - if c_compiler.compiler_type != "msvc": - raise ValueError("This method only supports MSVC") - - object_hash = self._hash_files(list(objects) + list(chained_dlls)) - - if is_win64(): - tag = 'win_amd64' - else: - tag = 'win32' - - basename = 'lib' + os.path.splitext( - os.path.basename(objects[0]))[0][:8] - root_name = basename + '.' + object_hash + '.gfortran-' + tag - dll_name = root_name + '.dll' - def_name = root_name + '.def' - lib_name = root_name + '.lib' - dll_path = os.path.join(extra_dll_dir, dll_name) - def_path = os.path.join(output_dir, def_name) - lib_path = os.path.join(output_dir, lib_name) - - if os.path.isfile(lib_path): - # Nothing to do - return lib_path, dll_path - - if is_archive: - objects = (["-Wl,--whole-archive"] + list(objects) + - ["-Wl,--no-whole-archive"]) - self.link_shared_object( - objects, - dll_name, - output_dir=extra_dll_dir, - extra_postargs=list(chained_dlls) + [ - '-Wl,--allow-multiple-definition', - '-Wl,--output-def,' + def_path, - '-Wl,--export-all-symbols', - '-Wl,--enable-auto-import', - '-static', - '-mlong-double-64', - ]) - - # No PowerPC! - if is_win64(): - specifier = '/MACHINE:X64' - else: - specifier = '/MACHINE:X86' - - # MSVC specific code - lib_args = ['/def:' + def_path, '/OUT:' + lib_path, specifier] - if not c_compiler.initialized: - c_compiler.initialize() - c_compiler.spawn([c_compiler.lib] + lib_args) - - return lib_path, dll_path - - def can_ccompiler_link(self, compiler): - # MSVC cannot link objects compiled by GNU fortran - return compiler.compiler_type not in ("msvc", ) - - def wrap_unlinkable_objects(self, objects, output_dir, extra_dll_dir): - """ - Convert a set of object files that are not compatible with the default - linker, to a file that is compatible. - """ - if self.c_compiler.compiler_type == "msvc": - # Compile a DLL and return the lib for the DLL as - # the object. Also keep track of previous DLLs that - # we have compiled so that we can link against them. - - # If there are .a archives, assume they are self-contained - # static libraries, and build separate DLLs for each - archives = [] - plain_objects = [] - for obj in objects: - if obj.lower().endswith('.a'): - archives.append(obj) - else: - plain_objects.append(obj) - - chained_libs = [] - chained_dlls = [] - for archive in archives[::-1]: - lib, dll = self._link_wrapper_lib( - [archive], - output_dir, - extra_dll_dir, - chained_dlls=chained_dlls, - is_archive=True) - chained_libs.insert(0, lib) - chained_dlls.insert(0, dll) - - if not plain_objects: - return chained_libs - - lib, dll = self._link_wrapper_lib( - plain_objects, - output_dir, - extra_dll_dir, - chained_dlls=chained_dlls, - is_archive=False) - return [lib] + chained_libs - else: - raise ValueError("Unsupported C compiler") - - -def _can_target(cmd, arch): - """Return true if the architecture supports the -arch flag""" - newcmd = cmd[:] - fid, filename = tempfile.mkstemp(suffix=".f") - os.close(fid) - try: - d = os.path.dirname(filename) - output = os.path.splitext(filename)[0] + ".o" - try: - newcmd.extend(["-arch", arch, "-c", filename]) - p = Popen(newcmd, stderr=STDOUT, stdout=PIPE, cwd=d) - p.communicate() - return p.returncode == 0 - finally: - if os.path.exists(output): - os.remove(output) - finally: - os.remove(filename) - - -if __name__ == '__main__': - from distutils import log - from numpy.distutils import customized_fcompiler - log.set_verbosity(2) - - print(customized_fcompiler('gnu').get_version()) - try: - print(customized_fcompiler('g95').get_version()) - except Exception as e: - print(e) diff --git a/numpy/distutils/fcompiler/hpux.py b/numpy/distutils/fcompiler/hpux.py deleted file mode 100644 index 09e6483bf5ad..000000000000 --- a/numpy/distutils/fcompiler/hpux.py +++ /dev/null @@ -1,41 +0,0 @@ -from numpy.distutils.fcompiler import FCompiler - -compilers = ['HPUXFCompiler'] - -class HPUXFCompiler(FCompiler): - - compiler_type = 'hpux' - description = 'HP Fortran 90 Compiler' - version_pattern = r'HP F90 (?P[^\s*,]*)' - - executables = { - 'version_cmd' : ["f90", "+version"], - 'compiler_f77' : ["f90"], - 'compiler_fix' : ["f90"], - 'compiler_f90' : ["f90"], - 'linker_so' : ["ld", "-b"], - 'archiver' : ["ar", "-cr"], - 'ranlib' : ["ranlib"] - } - module_dir_switch = None #XXX: fix me - module_include_switch = None #XXX: fix me - pic_flags = ['+Z'] - def get_flags(self): - return self.pic_flags + ['+ppu', '+DD64'] - def get_flags_opt(self): - return ['-O3'] - def get_libraries(self): - return ['m'] - def get_library_dirs(self): - opt = ['/usr/lib/hpux64'] - return opt - def get_version(self, force=0, ok_status=[256, 0, 1]): - # XXX status==256 may indicate 'unrecognized option' or - # 'no input file'. So, version_cmd needs more work. - return FCompiler.get_version(self, force, ok_status) - -if __name__ == '__main__': - from distutils import log - log.set_verbosity(10) - from numpy.distutils import customized_fcompiler - print(customized_fcompiler(compiler='hpux').get_version()) diff --git a/numpy/distutils/fcompiler/ibm.py b/numpy/distutils/fcompiler/ibm.py deleted file mode 100644 index 29927518c703..000000000000 --- a/numpy/distutils/fcompiler/ibm.py +++ /dev/null @@ -1,97 +0,0 @@ -import os -import re -import sys -import subprocess - -from numpy.distutils.fcompiler import FCompiler -from numpy.distutils.exec_command import find_executable -from numpy.distutils.misc_util import make_temp_file -from distutils import log - -compilers = ['IBMFCompiler'] - -class IBMFCompiler(FCompiler): - compiler_type = 'ibm' - description = 'IBM XL Fortran Compiler' - version_pattern = r'(xlf\(1\)\s*|)IBM XL Fortran ((Advanced Edition |)Version |Enterprise Edition V|for AIX, V)(?P[^\s*]*)' - #IBM XL Fortran Enterprise Edition V10.1 for AIX \nVersion: 10.01.0000.0004 - - executables = { - 'version_cmd' : ["", "-qversion"], - 'compiler_f77' : ["xlf"], - 'compiler_fix' : ["xlf90", "-qfixed"], - 'compiler_f90' : ["xlf90"], - 'linker_so' : ["xlf95"], - 'archiver' : ["ar", "-cr"], - 'ranlib' : ["ranlib"] - } - - def get_version(self,*args,**kwds): - version = FCompiler.get_version(self,*args,**kwds) - - if version is None and sys.platform.startswith('aix'): - # use lslpp to find out xlf version - lslpp = find_executable('lslpp') - xlf = find_executable('xlf') - if os.path.exists(xlf) and os.path.exists(lslpp): - try: - o = subprocess.check_output([lslpp, '-Lc', 'xlfcmp']) - except (OSError, subprocess.CalledProcessError): - pass - else: - m = re.search(r'xlfcmp:(?P\d+([.]\d+)+)', o) - if m: version = m.group('version') - - xlf_dir = '/etc/opt/ibmcmp/xlf' - if version is None and os.path.isdir(xlf_dir): - # linux: - # If the output of xlf does not contain version info - # (that's the case with xlf 8.1, for instance) then - # let's try another method: - l = sorted(os.listdir(xlf_dir)) - l.reverse() - l = [d for d in l if os.path.isfile(os.path.join(xlf_dir, d, 'xlf.cfg'))] - if l: - from distutils.version import LooseVersion - self.version = version = LooseVersion(l[0]) - return version - - def get_flags(self): - return ['-qextname'] - - def get_flags_debug(self): - return ['-g'] - - def get_flags_linker_so(self): - opt = [] - if sys.platform=='darwin': - opt.append('-Wl,-bundle,-flat_namespace,-undefined,suppress') - else: - opt.append('-bshared') - version = self.get_version(ok_status=[0, 40]) - if version is not None: - if sys.platform.startswith('aix'): - xlf_cfg = '/etc/xlf.cfg' - else: - xlf_cfg = '/etc/opt/ibmcmp/xlf/%s/xlf.cfg' % version - fo, new_cfg = make_temp_file(suffix='_xlf.cfg') - log.info('Creating '+new_cfg) - with open(xlf_cfg) as fi: - crt1_match = re.compile(r'\s*crt\s*=\s*(?P.*)/crt1.o').match - for line in fi: - m = crt1_match(line) - if m: - fo.write('crt = %s/bundle1.o\n' % (m.group('path'))) - else: - fo.write(line) - fo.close() - opt.append('-F'+new_cfg) - return opt - - def get_flags_opt(self): - return ['-O3'] - -if __name__ == '__main__': - from numpy.distutils import customized_fcompiler - log.set_verbosity(2) - print(customized_fcompiler(compiler='ibm').get_version()) diff --git a/numpy/distutils/fcompiler/intel.py b/numpy/distutils/fcompiler/intel.py deleted file mode 100644 index 1d6065904110..000000000000 --- a/numpy/distutils/fcompiler/intel.py +++ /dev/null @@ -1,211 +0,0 @@ -# http://developer.intel.com/software/products/compilers/flin/ -import sys - -from numpy.distutils.ccompiler import simple_version_match -from numpy.distutils.fcompiler import FCompiler, dummy_fortran_file - -compilers = ['IntelFCompiler', 'IntelVisualFCompiler', - 'IntelItaniumFCompiler', 'IntelItaniumVisualFCompiler', - 'IntelEM64VisualFCompiler', 'IntelEM64TFCompiler'] - - -def intel_version_match(type): - # Match against the important stuff in the version string - return simple_version_match(start=r'Intel.*?Fortran.*?(?:%s).*?Version' % (type,)) - - -class BaseIntelFCompiler(FCompiler): - def update_executables(self): - f = dummy_fortran_file() - self.executables['version_cmd'] = ['', '-FI', '-V', '-c', - f + '.f', '-o', f + '.o'] - - def runtime_library_dir_option(self, dir): - # TODO: could use -Xlinker here, if it's supported - assert "," not in dir - - return '-Wl,-rpath=%s' % dir - - -class IntelFCompiler(BaseIntelFCompiler): - - compiler_type = 'intel' - compiler_aliases = ('ifort',) - description = 'Intel Fortran Compiler for 32-bit apps' - version_match = intel_version_match('32-bit|IA-32') - - possible_executables = ['ifort', 'ifc'] - - executables = { - 'version_cmd' : None, # set by update_executables - 'compiler_f77' : [None, "-72", "-w90", "-w95"], - 'compiler_f90' : [None], - 'compiler_fix' : [None, "-FI"], - 'linker_so' : ["", "-shared"], - 'archiver' : ["ar", "-cr"], - 'ranlib' : ["ranlib"] - } - - pic_flags = ['-fPIC'] - module_dir_switch = '-module ' # Don't remove ending space! - module_include_switch = '-I' - - def get_flags_free(self): - return ['-FR'] - - def get_flags(self): - return ['-fPIC'] - - def get_flags_opt(self): # Scipy test failures with -O2 - v = self.get_version() - mpopt = 'openmp' if v and v < '15' else 'qopenmp' - return ['-fp-model', 'strict', '-O1', - '-assume', 'minus0', '-{}'.format(mpopt)] - - def get_flags_arch(self): - return [] - - def get_flags_linker_so(self): - opt = FCompiler.get_flags_linker_so(self) - v = self.get_version() - if v and v >= '8.0': - opt.append('-nofor_main') - if sys.platform == 'darwin': - # Here, it's -dynamiclib - try: - idx = opt.index('-shared') - opt.remove('-shared') - except ValueError: - idx = 0 - opt[idx:idx] = ['-dynamiclib', '-Wl,-undefined,dynamic_lookup'] - return opt - - -class IntelItaniumFCompiler(IntelFCompiler): - compiler_type = 'intele' - compiler_aliases = () - description = 'Intel Fortran Compiler for Itanium apps' - - version_match = intel_version_match('Itanium|IA-64') - - possible_executables = ['ifort', 'efort', 'efc'] - - executables = { - 'version_cmd' : None, - 'compiler_f77' : [None, "-FI", "-w90", "-w95"], - 'compiler_fix' : [None, "-FI"], - 'compiler_f90' : [None], - 'linker_so' : ['', "-shared"], - 'archiver' : ["ar", "-cr"], - 'ranlib' : ["ranlib"] - } - - -class IntelEM64TFCompiler(IntelFCompiler): - compiler_type = 'intelem' - compiler_aliases = () - description = 'Intel Fortran Compiler for 64-bit apps' - - version_match = intel_version_match('EM64T-based|Intel\\(R\\) 64|64|IA-64|64-bit') - - possible_executables = ['ifort', 'efort', 'efc'] - - executables = { - 'version_cmd' : None, - 'compiler_f77' : [None, "-FI"], - 'compiler_fix' : [None, "-FI"], - 'compiler_f90' : [None], - 'linker_so' : ['', "-shared"], - 'archiver' : ["ar", "-cr"], - 'ranlib' : ["ranlib"] - } - -# Is there no difference in the version string between the above compilers -# and the Visual compilers? - - -class IntelVisualFCompiler(BaseIntelFCompiler): - compiler_type = 'intelv' - description = 'Intel Visual Fortran Compiler for 32-bit apps' - version_match = intel_version_match('32-bit|IA-32') - - def update_executables(self): - f = dummy_fortran_file() - self.executables['version_cmd'] = ['', '/FI', '/c', - f + '.f', '/o', f + '.o'] - - ar_exe = 'lib.exe' - possible_executables = ['ifort', 'ifl'] - - executables = { - 'version_cmd' : None, - 'compiler_f77' : [None], - 'compiler_fix' : [None], - 'compiler_f90' : [None], - 'linker_so' : [None], - 'archiver' : [ar_exe, "/verbose", "/OUT:"], - 'ranlib' : None - } - - compile_switch = '/c ' - object_switch = '/Fo' # No space after /Fo! - library_switch = '/OUT:' # No space after /OUT:! - module_dir_switch = '/module:' # No space after /module: - module_include_switch = '/I' - - def get_flags(self): - opt = ['/nologo', '/MD', '/nbs', '/names:lowercase', - '/assume:underscore', '/fpp'] - return opt - - def get_flags_free(self): - return [] - - def get_flags_debug(self): - return ['/4Yb', '/d2'] - - def get_flags_opt(self): - return ['/O1', '/assume:minus0'] # Scipy test failures with /O2 - - def get_flags_arch(self): - return ["/arch:IA32", "/QaxSSE3"] - - def runtime_library_dir_option(self, dir): - raise NotImplementedError - - -class IntelItaniumVisualFCompiler(IntelVisualFCompiler): - compiler_type = 'intelev' - description = 'Intel Visual Fortran Compiler for Itanium apps' - - version_match = intel_version_match('Itanium') - - possible_executables = ['efl'] # XXX this is a wild guess - ar_exe = IntelVisualFCompiler.ar_exe - - executables = { - 'version_cmd' : None, - 'compiler_f77' : [None, "-FI", "-w90", "-w95"], - 'compiler_fix' : [None, "-FI", "-4L72", "-w"], - 'compiler_f90' : [None], - 'linker_so' : ['', "-shared"], - 'archiver' : [ar_exe, "/verbose", "/OUT:"], - 'ranlib' : None - } - - -class IntelEM64VisualFCompiler(IntelVisualFCompiler): - compiler_type = 'intelvem' - description = 'Intel Visual Fortran Compiler for 64-bit apps' - - version_match = simple_version_match(start=r'Intel\(R\).*?64,') - - def get_flags_arch(self): - return [] - - -if __name__ == '__main__': - from distutils import log - log.set_verbosity(2) - from numpy.distutils import customized_fcompiler - print(customized_fcompiler(compiler='intel').get_version()) diff --git a/numpy/distutils/fcompiler/lahey.py b/numpy/distutils/fcompiler/lahey.py deleted file mode 100644 index e925838268b8..000000000000 --- a/numpy/distutils/fcompiler/lahey.py +++ /dev/null @@ -1,45 +0,0 @@ -import os - -from numpy.distutils.fcompiler import FCompiler - -compilers = ['LaheyFCompiler'] - -class LaheyFCompiler(FCompiler): - - compiler_type = 'lahey' - description = 'Lahey/Fujitsu Fortran 95 Compiler' - version_pattern = r'Lahey/Fujitsu Fortran 95 Compiler Release (?P[^\s*]*)' - - executables = { - 'version_cmd' : ["", "--version"], - 'compiler_f77' : ["lf95", "--fix"], - 'compiler_fix' : ["lf95", "--fix"], - 'compiler_f90' : ["lf95"], - 'linker_so' : ["lf95", "-shared"], - 'archiver' : ["ar", "-cr"], - 'ranlib' : ["ranlib"] - } - - module_dir_switch = None #XXX Fix me - module_include_switch = None #XXX Fix me - - def get_flags_opt(self): - return ['-O'] - def get_flags_debug(self): - return ['-g', '--chk', '--chkglobal'] - def get_library_dirs(self): - opt = [] - d = os.environ.get('LAHEY') - if d: - opt.append(os.path.join(d, 'lib')) - return opt - def get_libraries(self): - opt = [] - opt.extend(['fj9f6', 'fj9i6', 'fj9ipp', 'fj9e6']) - return opt - -if __name__ == '__main__': - from distutils import log - log.set_verbosity(2) - from numpy.distutils import customized_fcompiler - print(customized_fcompiler(compiler='lahey').get_version()) diff --git a/numpy/distutils/fcompiler/mips.py b/numpy/distutils/fcompiler/mips.py deleted file mode 100644 index a0973804571b..000000000000 --- a/numpy/distutils/fcompiler/mips.py +++ /dev/null @@ -1,54 +0,0 @@ -from numpy.distutils.cpuinfo import cpu -from numpy.distutils.fcompiler import FCompiler - -compilers = ['MIPSFCompiler'] - -class MIPSFCompiler(FCompiler): - - compiler_type = 'mips' - description = 'MIPSpro Fortran Compiler' - version_pattern = r'MIPSpro Compilers: Version (?P[^\s*,]*)' - - executables = { - 'version_cmd' : ["", "-version"], - 'compiler_f77' : ["f77", "-f77"], - 'compiler_fix' : ["f90", "-fixedform"], - 'compiler_f90' : ["f90"], - 'linker_so' : ["f90", "-shared"], - 'archiver' : ["ar", "-cr"], - 'ranlib' : None - } - module_dir_switch = None #XXX: fix me - module_include_switch = None #XXX: fix me - pic_flags = ['-KPIC'] - - def get_flags(self): - return self.pic_flags + ['-n32'] - def get_flags_opt(self): - return ['-O3'] - def get_flags_arch(self): - opt = [] - for a in '19 20 21 22_4k 22_5k 24 25 26 27 28 30 32_5k 32_10k'.split(): - if getattr(cpu, 'is_IP%s'%a)(): - opt.append('-TARG:platform=IP%s' % a) - break - return opt - def get_flags_arch_f77(self): - r = None - if cpu.is_r10000(): r = 10000 - elif cpu.is_r12000(): r = 12000 - elif cpu.is_r8000(): r = 8000 - elif cpu.is_r5000(): r = 5000 - elif cpu.is_r4000(): r = 4000 - if r is not None: - return ['r%s' % (r)] - return [] - def get_flags_arch_f90(self): - r = self.get_flags_arch_f77() - if r: - r[0] = '-' + r[0] - return r - -if __name__ == '__main__': - from numpy.distutils import customized_fcompiler - print(customized_fcompiler(compiler='mips').get_version()) diff --git a/numpy/distutils/fcompiler/nag.py b/numpy/distutils/fcompiler/nag.py deleted file mode 100644 index 939201f44e02..000000000000 --- a/numpy/distutils/fcompiler/nag.py +++ /dev/null @@ -1,87 +0,0 @@ -import sys -import re -from numpy.distutils.fcompiler import FCompiler - -compilers = ['NAGFCompiler', 'NAGFORCompiler'] - -class BaseNAGFCompiler(FCompiler): - version_pattern = r'NAG.* Release (?P[^(\s]*)' - - def version_match(self, version_string): - m = re.search(self.version_pattern, version_string) - if m: - return m.group('version') - else: - return None - - def get_flags_linker_so(self): - return ["-Wl,-shared"] - def get_flags_opt(self): - return ['-O4'] - def get_flags_arch(self): - return [] - -class NAGFCompiler(BaseNAGFCompiler): - - compiler_type = 'nag' - description = 'NAGWare Fortran 95 Compiler' - - executables = { - 'version_cmd' : ["", "-V"], - 'compiler_f77' : ["f95", "-fixed"], - 'compiler_fix' : ["f95", "-fixed"], - 'compiler_f90' : ["f95"], - 'linker_so' : [""], - 'archiver' : ["ar", "-cr"], - 'ranlib' : ["ranlib"] - } - - def get_flags_linker_so(self): - if sys.platform == 'darwin': - return ['-unsharedf95', '-Wl,-bundle,-flat_namespace,-undefined,suppress'] - return BaseNAGFCompiler.get_flags_linker_so(self) - def get_flags_arch(self): - version = self.get_version() - if version and version < '5.1': - return ['-target=native'] - else: - return BaseNAGFCompiler.get_flags_arch(self) - def get_flags_debug(self): - return ['-g', '-gline', '-g90', '-nan', '-C'] - -class NAGFORCompiler(BaseNAGFCompiler): - - compiler_type = 'nagfor' - description = 'NAG Fortran Compiler' - - executables = { - 'version_cmd' : ["nagfor", "-V"], - 'compiler_f77' : ["nagfor", "-fixed"], - 'compiler_fix' : ["nagfor", "-fixed"], - 'compiler_f90' : ["nagfor"], - 'linker_so' : ["nagfor"], - 'archiver' : ["ar", "-cr"], - 'ranlib' : ["ranlib"] - } - - def get_flags_linker_so(self): - if sys.platform == 'darwin': - return ['-unsharedrts', - '-Wl,-bundle,-flat_namespace,-undefined,suppress'] - return BaseNAGFCompiler.get_flags_linker_so(self) - def get_flags_debug(self): - version = self.get_version() - if version and version > '6.1': - return ['-g', '-u', '-nan', '-C=all', '-thread_safe', - '-kind=unique', '-Warn=allocation', '-Warn=subnormal'] - else: - return ['-g', '-nan', '-C=all', '-u', '-thread_safe'] - - -if __name__ == '__main__': - from distutils import log - log.set_verbosity(2) - from numpy.distutils import customized_fcompiler - compiler = customized_fcompiler(compiler='nagfor') - print(compiler.get_version()) - print(compiler.get_flags_debug()) diff --git a/numpy/distutils/fcompiler/none.py b/numpy/distutils/fcompiler/none.py deleted file mode 100644 index ef411fffc7cb..000000000000 --- a/numpy/distutils/fcompiler/none.py +++ /dev/null @@ -1,28 +0,0 @@ -from numpy.distutils.fcompiler import FCompiler -from numpy.distutils import customized_fcompiler - -compilers = ['NoneFCompiler'] - -class NoneFCompiler(FCompiler): - - compiler_type = 'none' - description = 'Fake Fortran compiler' - - executables = {'compiler_f77': None, - 'compiler_f90': None, - 'compiler_fix': None, - 'linker_so': None, - 'linker_exe': None, - 'archiver': None, - 'ranlib': None, - 'version_cmd': None, - } - - def find_executables(self): - pass - - -if __name__ == '__main__': - from distutils import log - log.set_verbosity(2) - print(customized_fcompiler(compiler='none').get_version()) diff --git a/numpy/distutils/fcompiler/nv.py b/numpy/distutils/fcompiler/nv.py deleted file mode 100644 index f518c8b0027a..000000000000 --- a/numpy/distutils/fcompiler/nv.py +++ /dev/null @@ -1,53 +0,0 @@ -from numpy.distutils.fcompiler import FCompiler - -compilers = ['NVHPCFCompiler'] - -class NVHPCFCompiler(FCompiler): - """ NVIDIA High Performance Computing (HPC) SDK Fortran Compiler - - https://developer.nvidia.com/hpc-sdk - - Since august 2020 the NVIDIA HPC SDK includes the compilers formerly known as The Portland Group compilers, - https://www.pgroup.com/index.htm. - See also `numpy.distutils.fcompiler.pg`. - """ - - compiler_type = 'nv' - description = 'NVIDIA HPC SDK' - version_pattern = r'\s*(nvfortran|.+ \(aka nvfortran\)) (?P[\d.-]+).*' - - executables = { - 'version_cmd': ["", "-V"], - 'compiler_f77': ["nvfortran"], - 'compiler_fix': ["nvfortran", "-Mfixed"], - 'compiler_f90': ["nvfortran"], - 'linker_so': [""], - 'archiver': ["ar", "-cr"], - 'ranlib': ["ranlib"] - } - pic_flags = ['-fpic'] - - module_dir_switch = '-module ' - module_include_switch = '-I' - - def get_flags(self): - opt = ['-Minform=inform', '-Mnosecond_underscore'] - return self.pic_flags + opt - - def get_flags_opt(self): - return ['-fast'] - - def get_flags_debug(self): - return ['-g'] - - def get_flags_linker_so(self): - return ["-shared", '-fpic'] - - def runtime_library_dir_option(self, dir): - return '-R%s' % dir - -if __name__ == '__main__': - from distutils import log - log.set_verbosity(2) - from numpy.distutils import customized_fcompiler - print(customized_fcompiler(compiler='nv').get_version()) diff --git a/numpy/distutils/fcompiler/pathf95.py b/numpy/distutils/fcompiler/pathf95.py deleted file mode 100644 index 0768cb12e87a..000000000000 --- a/numpy/distutils/fcompiler/pathf95.py +++ /dev/null @@ -1,33 +0,0 @@ -from numpy.distutils.fcompiler import FCompiler - -compilers = ['PathScaleFCompiler'] - -class PathScaleFCompiler(FCompiler): - - compiler_type = 'pathf95' - description = 'PathScale Fortran Compiler' - version_pattern = r'PathScale\(TM\) Compiler Suite: Version (?P[\d.]+)' - - executables = { - 'version_cmd' : ["pathf95", "-version"], - 'compiler_f77' : ["pathf95", "-fixedform"], - 'compiler_fix' : ["pathf95", "-fixedform"], - 'compiler_f90' : ["pathf95"], - 'linker_so' : ["pathf95", "-shared"], - 'archiver' : ["ar", "-cr"], - 'ranlib' : ["ranlib"] - } - pic_flags = ['-fPIC'] - module_dir_switch = '-module ' # Don't remove ending space! - module_include_switch = '-I' - - def get_flags_opt(self): - return ['-O3'] - def get_flags_debug(self): - return ['-g'] - -if __name__ == '__main__': - from distutils import log - log.set_verbosity(2) - from numpy.distutils import customized_fcompiler - print(customized_fcompiler(compiler='pathf95').get_version()) diff --git a/numpy/distutils/fcompiler/pg.py b/numpy/distutils/fcompiler/pg.py deleted file mode 100644 index 72442c4fec61..000000000000 --- a/numpy/distutils/fcompiler/pg.py +++ /dev/null @@ -1,128 +0,0 @@ -# http://www.pgroup.com -import sys - -from numpy.distutils.fcompiler import FCompiler -from sys import platform -from os.path import join, dirname, normpath - -compilers = ['PGroupFCompiler', 'PGroupFlangCompiler'] - - -class PGroupFCompiler(FCompiler): - - compiler_type = 'pg' - description = 'Portland Group Fortran Compiler' - version_pattern = r'\s*pg(f77|f90|hpf|fortran) (?P[\d.-]+).*' - - if platform == 'darwin': - executables = { - 'version_cmd': ["", "-V"], - 'compiler_f77': ["pgfortran", "-dynamiclib"], - 'compiler_fix': ["pgfortran", "-Mfixed", "-dynamiclib"], - 'compiler_f90': ["pgfortran", "-dynamiclib"], - 'linker_so': ["libtool"], - 'archiver': ["ar", "-cr"], - 'ranlib': ["ranlib"] - } - pic_flags = [''] - else: - executables = { - 'version_cmd': ["", "-V"], - 'compiler_f77': ["pgfortran"], - 'compiler_fix': ["pgfortran", "-Mfixed"], - 'compiler_f90': ["pgfortran"], - 'linker_so': [""], - 'archiver': ["ar", "-cr"], - 'ranlib': ["ranlib"] - } - pic_flags = ['-fpic'] - - module_dir_switch = '-module ' - module_include_switch = '-I' - - def get_flags(self): - opt = ['-Minform=inform', '-Mnosecond_underscore'] - return self.pic_flags + opt - - def get_flags_opt(self): - return ['-fast'] - - def get_flags_debug(self): - return ['-g'] - - if platform == 'darwin': - def get_flags_linker_so(self): - return ["-dynamic", '-undefined', 'dynamic_lookup'] - - else: - def get_flags_linker_so(self): - return ["-shared", '-fpic'] - - def runtime_library_dir_option(self, dir): - return '-R%s' % dir - - -import functools - -class PGroupFlangCompiler(FCompiler): - compiler_type = 'flang' - description = 'Portland Group Fortran LLVM Compiler' - version_pattern = r'\s*(flang|clang) version (?P[\d.-]+).*' - - ar_exe = 'lib.exe' - possible_executables = ['flang'] - - executables = { - 'version_cmd': ["", "--version"], - 'compiler_f77': ["flang"], - 'compiler_fix': ["flang"], - 'compiler_f90': ["flang"], - 'linker_so': [None], - 'archiver': [ar_exe, "/verbose", "/OUT:"], - 'ranlib': None - } - - library_switch = '/OUT:' # No space after /OUT:! - module_dir_switch = '-module ' # Don't remove ending space! - - def get_libraries(self): - opt = FCompiler.get_libraries(self) - opt.extend(['flang', 'flangrti', 'ompstub']) - return opt - - @functools.lru_cache(maxsize=128) - def get_library_dirs(self): - """List of compiler library directories.""" - opt = FCompiler.get_library_dirs(self) - flang_dir = dirname(self.executables['compiler_f77'][0]) - opt.append(normpath(join(flang_dir, '..', 'lib'))) - - return opt - - def get_flags(self): - return [] - - def get_flags_free(self): - return [] - - def get_flags_debug(self): - return ['-g'] - - def get_flags_opt(self): - return ['-O3'] - - def get_flags_arch(self): - return [] - - def runtime_library_dir_option(self, dir): - raise NotImplementedError - - -if __name__ == '__main__': - from distutils import log - log.set_verbosity(2) - from numpy.distutils import customized_fcompiler - if 'flang' in sys.argv: - print(customized_fcompiler(compiler='flang').get_version()) - else: - print(customized_fcompiler(compiler='pg').get_version()) diff --git a/numpy/distutils/fcompiler/sun.py b/numpy/distutils/fcompiler/sun.py deleted file mode 100644 index d039f0b25705..000000000000 --- a/numpy/distutils/fcompiler/sun.py +++ /dev/null @@ -1,51 +0,0 @@ -from numpy.distutils.ccompiler import simple_version_match -from numpy.distutils.fcompiler import FCompiler - -compilers = ['SunFCompiler'] - -class SunFCompiler(FCompiler): - - compiler_type = 'sun' - description = 'Sun or Forte Fortran 95 Compiler' - # ex: - # f90: Sun WorkShop 6 update 2 Fortran 95 6.2 Patch 111690-10 2003/08/28 - version_match = simple_version_match( - start=r'f9[05]: (Sun|Forte|WorkShop).*Fortran 95') - - executables = { - 'version_cmd' : ["", "-V"], - 'compiler_f77' : ["f90"], - 'compiler_fix' : ["f90", "-fixed"], - 'compiler_f90' : ["f90"], - 'linker_so' : ["", "-Bdynamic", "-G"], - 'archiver' : ["ar", "-cr"], - 'ranlib' : ["ranlib"] - } - module_dir_switch = '-moddir=' - module_include_switch = '-M' - pic_flags = ['-xcode=pic32'] - - def get_flags_f77(self): - ret = ["-ftrap=%none"] - if (self.get_version() or '') >= '7': - ret.append("-f77") - else: - ret.append("-fixed") - return ret - def get_opt(self): - return ['-fast', '-dalign'] - def get_arch(self): - return ['-xtarget=generic'] - def get_libraries(self): - opt = [] - opt.extend(['fsu', 'sunmath', 'mvec']) - return opt - - def runtime_library_dir_option(self, dir): - return '-R%s' % dir - -if __name__ == '__main__': - from distutils import log - log.set_verbosity(2) - from numpy.distutils import customized_fcompiler - print(customized_fcompiler(compiler='sun').get_version()) diff --git a/numpy/distutils/fcompiler/vast.py b/numpy/distutils/fcompiler/vast.py deleted file mode 100644 index 92a1647ba437..000000000000 --- a/numpy/distutils/fcompiler/vast.py +++ /dev/null @@ -1,52 +0,0 @@ -import os - -from numpy.distutils.fcompiler.gnu import GnuFCompiler - -compilers = ['VastFCompiler'] - -class VastFCompiler(GnuFCompiler): - compiler_type = 'vast' - compiler_aliases = () - description = 'Pacific-Sierra Research Fortran 90 Compiler' - version_pattern = (r'\s*Pacific-Sierra Research vf90 ' - r'(Personal|Professional)\s+(?P[^\s]*)') - - # VAST f90 does not support -o with -c. So, object files are created - # to the current directory and then moved to build directory - object_switch = ' && function _mvfile { mv -v `basename $1` $1 ; } && _mvfile ' - - executables = { - 'version_cmd' : ["vf90", "-v"], - 'compiler_f77' : ["g77"], - 'compiler_fix' : ["f90", "-Wv,-ya"], - 'compiler_f90' : ["f90"], - 'linker_so' : [""], - 'archiver' : ["ar", "-cr"], - 'ranlib' : ["ranlib"] - } - module_dir_switch = None #XXX Fix me - module_include_switch = None #XXX Fix me - - def find_executables(self): - pass - - def get_version_cmd(self): - f90 = self.compiler_f90[0] - d, b = os.path.split(f90) - vf90 = os.path.join(d, 'v'+b) - return vf90 - - def get_flags_arch(self): - vast_version = self.get_version() - gnu = GnuFCompiler() - gnu.customize(None) - self.version = gnu.get_version() - opt = GnuFCompiler.get_flags_arch(self) - self.version = vast_version - return opt - -if __name__ == '__main__': - from distutils import log - log.set_verbosity(2) - from numpy.distutils import customized_fcompiler - print(customized_fcompiler(compiler='vast').get_version()) diff --git a/numpy/distutils/from_template.py b/numpy/distutils/from_template.py deleted file mode 100644 index 90d1f4c384c7..000000000000 --- a/numpy/distutils/from_template.py +++ /dev/null @@ -1,261 +0,0 @@ -#!/usr/bin/env python3 -""" - -process_file(filename) - - takes templated file .xxx.src and produces .xxx file where .xxx - is .pyf .f90 or .f using the following template rules: - - '<..>' denotes a template. - - All function and subroutine blocks in a source file with names that - contain '<..>' will be replicated according to the rules in '<..>'. - - The number of comma-separated words in '<..>' will determine the number of - replicates. - - '<..>' may have two different forms, named and short. For example, - - named: - where anywhere inside a block '

' will be replaced with - 'd', 's', 'z', and 'c' for each replicate of the block. - - <_c> is already defined: <_c=s,d,c,z> - <_t> is already defined: <_t=real,double precision,complex,double complex> - - short: - , a short form of the named, useful when no

appears inside - a block. - - In general, '<..>' contains a comma separated list of arbitrary - expressions. If these expression must contain a comma|leftarrow|rightarrow, - then prepend the comma|leftarrow|rightarrow with a backslash. - - If an expression matches '\\' then it will be replaced - by -th expression. - - Note that all '<..>' forms in a block must have the same number of - comma-separated entries. - - Predefined named template rules: - - - - - - -""" -__all__ = ['process_str', 'process_file'] - -import os -import sys -import re - -routine_start_re = re.compile(r'(\n|\A)(( (\$|\*))|)\s*(subroutine|function)\b', re.I) -routine_end_re = re.compile(r'\n\s*end\s*(subroutine|function)\b.*(\n|\Z)', re.I) -function_start_re = re.compile(r'\n (\$|\*)\s*function\b', re.I) - -def parse_structure(astr): - """ Return a list of tuples for each function or subroutine each - tuple is the start and end of a subroutine or function to be - expanded. - """ - - spanlist = [] - ind = 0 - while True: - m = routine_start_re.search(astr, ind) - if m is None: - break - start = m.start() - if function_start_re.match(astr, start, m.end()): - while True: - i = astr.rfind('\n', ind, start) - if i==-1: - break - start = i - if astr[i:i+7]!='\n $': - break - start += 1 - m = routine_end_re.search(astr, m.end()) - ind = end = m and m.end()-1 or len(astr) - spanlist.append((start, end)) - return spanlist - -template_re = re.compile(r"<\s*(\w[\w\d]*)\s*>") -named_re = re.compile(r"<\s*(\w[\w\d]*)\s*=\s*(.*?)\s*>") -list_re = re.compile(r"<\s*((.*?))\s*>") - -def find_repl_patterns(astr): - reps = named_re.findall(astr) - names = {} - for rep in reps: - name = rep[0].strip() or unique_key(names) - repl = rep[1].replace(r'\,', '@comma@') - thelist = conv(repl) - names[name] = thelist - return names - -def find_and_remove_repl_patterns(astr): - names = find_repl_patterns(astr) - astr = re.subn(named_re, '', astr)[0] - return astr, names - -item_re = re.compile(r"\A\\(?P\d+)\Z") -def conv(astr): - b = astr.split(',') - l = [x.strip() for x in b] - for i in range(len(l)): - m = item_re.match(l[i]) - if m: - j = int(m.group('index')) - l[i] = l[j] - return ','.join(l) - -def unique_key(adict): - """ Obtain a unique key given a dictionary.""" - allkeys = list(adict.keys()) - done = False - n = 1 - while not done: - newkey = '__l%s' % (n) - if newkey in allkeys: - n += 1 - else: - done = True - return newkey - - -template_name_re = re.compile(r'\A\s*(\w[\w\d]*)\s*\Z') -def expand_sub(substr, names): - substr = substr.replace(r'\>', '@rightarrow@') - substr = substr.replace(r'\<', '@leftarrow@') - lnames = find_repl_patterns(substr) - substr = named_re.sub(r"<\1>", substr) # get rid of definition templates - - def listrepl(mobj): - thelist = conv(mobj.group(1).replace(r'\,', '@comma@')) - if template_name_re.match(thelist): - return "<%s>" % (thelist) - name = None - for key in lnames.keys(): # see if list is already in dictionary - if lnames[key] == thelist: - name = key - if name is None: # this list is not in the dictionary yet - name = unique_key(lnames) - lnames[name] = thelist - return "<%s>" % name - - substr = list_re.sub(listrepl, substr) # convert all lists to named templates - # newnames are constructed as needed - - numsubs = None - base_rule = None - rules = {} - for r in template_re.findall(substr): - if r not in rules: - thelist = lnames.get(r, names.get(r, None)) - if thelist is None: - raise ValueError('No replicates found for <%s>' % (r)) - if r not in names and not thelist.startswith('_'): - names[r] = thelist - rule = [i.replace('@comma@', ',') for i in thelist.split(',')] - num = len(rule) - - if numsubs is None: - numsubs = num - rules[r] = rule - base_rule = r - elif num == numsubs: - rules[r] = rule - else: - print("Mismatch in number of replacements (base <%s=%s>)" - " for <%s=%s>. Ignoring." % - (base_rule, ','.join(rules[base_rule]), r, thelist)) - if not rules: - return substr - - def namerepl(mobj): - name = mobj.group(1) - return rules.get(name, (k+1)*[name])[k] - - newstr = '' - for k in range(numsubs): - newstr += template_re.sub(namerepl, substr) + '\n\n' - - newstr = newstr.replace('@rightarrow@', '>') - newstr = newstr.replace('@leftarrow@', '<') - return newstr - -def process_str(allstr): - newstr = allstr - writestr = '' - - struct = parse_structure(newstr) - - oldend = 0 - names = {} - names.update(_special_names) - for sub in struct: - cleanedstr, defs = find_and_remove_repl_patterns(newstr[oldend:sub[0]]) - writestr += cleanedstr - names.update(defs) - writestr += expand_sub(newstr[sub[0]:sub[1]], names) - oldend = sub[1] - writestr += newstr[oldend:] - - return writestr - -include_src_re = re.compile(r"(\n|\A)\s*include\s*['\"](?P[\w\d./\\]+\.src)['\"]", re.I) - -def resolve_includes(source): - d = os.path.dirname(source) - with open(source) as fid: - lines = [] - for line in fid: - m = include_src_re.match(line) - if m: - fn = m.group('name') - if not os.path.isabs(fn): - fn = os.path.join(d, fn) - if os.path.isfile(fn): - lines.extend(resolve_includes(fn)) - else: - lines.append(line) - else: - lines.append(line) - return lines - -def process_file(source): - lines = resolve_includes(source) - return process_str(''.join(lines)) - -_special_names = find_repl_patterns(''' -<_c=s,d,c,z> -<_t=real,double precision,complex,double complex> - - - - - -''') - -def main(): - try: - file = sys.argv[1] - except IndexError: - fid = sys.stdin - outfile = sys.stdout - else: - fid = open(file, 'r') - (base, ext) = os.path.splitext(file) - newname = base - outfile = open(newname, 'w') - - allstr = fid.read() - writestr = process_str(allstr) - outfile.write(writestr) - - -if __name__ == "__main__": - main() diff --git a/numpy/distutils/fujitsuccompiler.py b/numpy/distutils/fujitsuccompiler.py deleted file mode 100644 index c25900b34f1d..000000000000 --- a/numpy/distutils/fujitsuccompiler.py +++ /dev/null @@ -1,28 +0,0 @@ -from distutils.unixccompiler import UnixCCompiler - -class FujitsuCCompiler(UnixCCompiler): - - """ - Fujitsu compiler. - """ - - compiler_type = 'fujitsu' - cc_exe = 'fcc' - cxx_exe = 'FCC' - - def __init__(self, verbose=0, dry_run=0, force=0): - UnixCCompiler.__init__(self, verbose, dry_run, force) - cc_compiler = self.cc_exe - cxx_compiler = self.cxx_exe - self.set_executables( - compiler=cc_compiler + - ' -O3 -Nclang -fPIC', - compiler_so=cc_compiler + - ' -O3 -Nclang -fPIC', - compiler_cxx=cxx_compiler + - ' -O3 -Nclang -fPIC', - linker_exe=cc_compiler + - ' -lfj90i -lfj90f -lfjsrcinfo -lelf -shared', - linker_so=cc_compiler + - ' -lfj90i -lfj90f -lfjsrcinfo -lelf -shared' - ) diff --git a/numpy/distutils/intelccompiler.py b/numpy/distutils/intelccompiler.py deleted file mode 100644 index 77fb39889a29..000000000000 --- a/numpy/distutils/intelccompiler.py +++ /dev/null @@ -1,106 +0,0 @@ -import platform - -from distutils.unixccompiler import UnixCCompiler -from numpy.distutils.exec_command import find_executable -from numpy.distutils.ccompiler import simple_version_match -if platform.system() == 'Windows': - from numpy.distutils.msvc9compiler import MSVCCompiler - - -class IntelCCompiler(UnixCCompiler): - """A modified Intel compiler compatible with a GCC-built Python.""" - compiler_type = 'intel' - cc_exe = 'icc' - cc_args = 'fPIC' - - def __init__(self, verbose=0, dry_run=0, force=0): - UnixCCompiler.__init__(self, verbose, dry_run, force) - - v = self.get_version() - mpopt = 'openmp' if v and v < '15' else 'qopenmp' - self.cc_exe = ('icc -fPIC -fp-model strict -O3 ' - '-fomit-frame-pointer -{}').format(mpopt) - compiler = self.cc_exe - - if platform.system() == 'Darwin': - shared_flag = '-Wl,-undefined,dynamic_lookup' - else: - shared_flag = '-shared' - self.set_executables(compiler=compiler, - compiler_so=compiler, - compiler_cxx=compiler, - archiver='xiar' + ' cru', - linker_exe=compiler + ' -shared-intel', - linker_so=compiler + ' ' + shared_flag + - ' -shared-intel') - - -class IntelItaniumCCompiler(IntelCCompiler): - compiler_type = 'intele' - cc_exe = 'icc' - - -class IntelEM64TCCompiler(UnixCCompiler): - """ - A modified Intel x86_64 compiler compatible with a 64bit GCC-built Python. - """ - compiler_type = 'intelem' - cc_exe = 'icc -m64' - cc_args = '-fPIC' - - def __init__(self, verbose=0, dry_run=0, force=0): - UnixCCompiler.__init__(self, verbose, dry_run, force) - - v = self.get_version() - mpopt = 'openmp' if v and v < '15' else 'qopenmp' - self.cc_exe = ('icc -std=c99 -m64 -fPIC -fp-model strict -O3 ' - '-fomit-frame-pointer -{}').format(mpopt) - compiler = self.cc_exe - - if platform.system() == 'Darwin': - shared_flag = '-Wl,-undefined,dynamic_lookup' - else: - shared_flag = '-shared' - self.set_executables(compiler=compiler, - compiler_so=compiler, - compiler_cxx=compiler, - archiver='xiar' + ' cru', - linker_exe=compiler + ' -shared-intel', - linker_so=compiler + ' ' + shared_flag + - ' -shared-intel') - - -if platform.system() == 'Windows': - class IntelCCompilerW(MSVCCompiler): - """ - A modified Intel compiler compatible with an MSVC-built Python. - """ - compiler_type = 'intelw' - compiler_cxx = 'icl' - - def __init__(self, verbose=0, dry_run=0, force=0): - MSVCCompiler.__init__(self, verbose, dry_run, force) - version_match = simple_version_match(start=r'Intel\(R\).*?32,') - self.__version = version_match - - def initialize(self, plat_name=None): - MSVCCompiler.initialize(self, plat_name) - self.cc = self.find_exe('icl.exe') - self.lib = self.find_exe('xilib') - self.linker = self.find_exe('xilink') - self.compile_options = ['/nologo', '/O3', '/MD', '/W3', - '/Qstd=c99'] - self.compile_options_debug = ['/nologo', '/Od', '/MDd', '/W3', - '/Qstd=c99', '/Z7', '/D_DEBUG'] - - class IntelEM64TCCompilerW(IntelCCompilerW): - """ - A modified Intel x86_64 compiler compatible with - a 64bit MSVC-built Python. - """ - compiler_type = 'intelemw' - - def __init__(self, verbose=0, dry_run=0, force=0): - MSVCCompiler.__init__(self, verbose, dry_run, force) - version_match = simple_version_match(start=r'Intel\(R\).*?64,') - self.__version = version_match diff --git a/numpy/distutils/lib2def.py b/numpy/distutils/lib2def.py deleted file mode 100644 index 851682c63310..000000000000 --- a/numpy/distutils/lib2def.py +++ /dev/null @@ -1,116 +0,0 @@ -import re -import sys -import subprocess - -__doc__ = """This module generates a DEF file from the symbols in -an MSVC-compiled DLL import library. It correctly discriminates between -data and functions. The data is collected from the output of the program -nm(1). - -Usage: - python lib2def.py [libname.lib] [output.def] -or - python lib2def.py [libname.lib] > output.def - -libname.lib defaults to python.lib and output.def defaults to stdout - -Author: Robert Kern -Last Update: April 30, 1999 -""" - -__version__ = '0.1a' - -py_ver = "%d%d" % tuple(sys.version_info[:2]) - -DEFAULT_NM = ['nm', '-Cs'] - -DEF_HEADER = """LIBRARY python%s.dll -;CODE PRELOAD MOVEABLE DISCARDABLE -;DATA PRELOAD SINGLE - -EXPORTS -""" % py_ver -# the header of the DEF file - -FUNC_RE = re.compile(r"^(.*) in python%s\.dll" % py_ver, re.MULTILINE) -DATA_RE = re.compile(r"^_imp__(.*) in python%s\.dll" % py_ver, re.MULTILINE) - -def parse_cmd(): - """Parses the command-line arguments. - -libfile, deffile = parse_cmd()""" - if len(sys.argv) == 3: - if sys.argv[1][-4:] == '.lib' and sys.argv[2][-4:] == '.def': - libfile, deffile = sys.argv[1:] - elif sys.argv[1][-4:] == '.def' and sys.argv[2][-4:] == '.lib': - deffile, libfile = sys.argv[1:] - else: - print("I'm assuming that your first argument is the library") - print("and the second is the DEF file.") - elif len(sys.argv) == 2: - if sys.argv[1][-4:] == '.def': - deffile = sys.argv[1] - libfile = 'python%s.lib' % py_ver - elif sys.argv[1][-4:] == '.lib': - deffile = None - libfile = sys.argv[1] - else: - libfile = 'python%s.lib' % py_ver - deffile = None - return libfile, deffile - -def getnm(nm_cmd=['nm', '-Cs', 'python%s.lib' % py_ver], shell=True): - """Returns the output of nm_cmd via a pipe. - -nm_output = getnm(nm_cmd = 'nm -Cs py_lib')""" - p = subprocess.Popen(nm_cmd, shell=shell, stdout=subprocess.PIPE, - stderr=subprocess.PIPE, text=True) - nm_output, nm_err = p.communicate() - if p.returncode != 0: - raise RuntimeError('failed to run "%s": "%s"' % ( - ' '.join(nm_cmd), nm_err)) - return nm_output - -def parse_nm(nm_output): - """Returns a tuple of lists: dlist for the list of data -symbols and flist for the list of function symbols. - -dlist, flist = parse_nm(nm_output)""" - data = DATA_RE.findall(nm_output) - func = FUNC_RE.findall(nm_output) - - flist = [] - for sym in data: - if sym in func and (sym[:2] == 'Py' or sym[:3] == '_Py' or sym[:4] == 'init'): - flist.append(sym) - - dlist = [] - for sym in data: - if sym not in flist and (sym[:2] == 'Py' or sym[:3] == '_Py'): - dlist.append(sym) - - dlist.sort() - flist.sort() - return dlist, flist - -def output_def(dlist, flist, header, file = sys.stdout): - """Outputs the final DEF file to a file defaulting to stdout. - -output_def(dlist, flist, header, file = sys.stdout)""" - for data_sym in dlist: - header = header + '\t%s DATA\n' % data_sym - header = header + '\n' # blank line - for func_sym in flist: - header = header + '\t%s\n' % func_sym - file.write(header) - -if __name__ == '__main__': - libfile, deffile = parse_cmd() - if deffile is None: - deffile = sys.stdout - else: - deffile = open(deffile, 'w') - nm_cmd = DEFAULT_NM + [str(libfile)] - nm_output = getnm(nm_cmd, shell=False) - dlist, flist = parse_nm(nm_output) - output_def(dlist, flist, DEF_HEADER, deffile) diff --git a/numpy/distutils/line_endings.py b/numpy/distutils/line_endings.py deleted file mode 100644 index 686e5ebd937f..000000000000 --- a/numpy/distutils/line_endings.py +++ /dev/null @@ -1,77 +0,0 @@ -""" Functions for converting from DOS to UNIX line endings - -""" -import os -import re -import sys - - -def dos2unix(file): - "Replace CRLF with LF in argument files. Print names of changed files." - if os.path.isdir(file): - print(file, "Directory!") - return - - with open(file, "rb") as fp: - data = fp.read() - if '\0' in data: - print(file, "Binary!") - return - - newdata = re.sub("\r\n", "\n", data) - if newdata != data: - print('dos2unix:', file) - with open(file, "wb") as f: - f.write(newdata) - return file - else: - print(file, 'ok') - -def dos2unix_one_dir(modified_files, dir_name, file_names): - for file in file_names: - full_path = os.path.join(dir_name, file) - file = dos2unix(full_path) - if file is not None: - modified_files.append(file) - -def dos2unix_dir(dir_name): - modified_files = [] - os.path.walk(dir_name, dos2unix_one_dir, modified_files) - return modified_files -#---------------------------------- - -def unix2dos(file): - "Replace LF with CRLF in argument files. Print names of changed files." - if os.path.isdir(file): - print(file, "Directory!") - return - - with open(file, "rb") as fp: - data = fp.read() - if '\0' in data: - print(file, "Binary!") - return - newdata = re.sub("\r\n", "\n", data) - newdata = re.sub("\n", "\r\n", newdata) - if newdata != data: - print('unix2dos:', file) - with open(file, "wb") as f: - f.write(newdata) - return file - else: - print(file, 'ok') - -def unix2dos_one_dir(modified_files, dir_name, file_names): - for file in file_names: - full_path = os.path.join(dir_name, file) - unix2dos(full_path) - if file is not None: - modified_files.append(file) - -def unix2dos_dir(dir_name): - modified_files = [] - os.path.walk(dir_name, unix2dos_one_dir, modified_files) - return modified_files - -if __name__ == "__main__": - dos2unix_dir(sys.argv[1]) diff --git a/numpy/distutils/log.py b/numpy/distutils/log.py deleted file mode 100644 index 3347f56d6fe9..000000000000 --- a/numpy/distutils/log.py +++ /dev/null @@ -1,111 +0,0 @@ -# Colored log -import sys -from distutils.log import * # noqa: F403 -from distutils.log import Log as old_Log -from distutils.log import _global_log - -from numpy.distutils.misc_util import (red_text, default_text, cyan_text, - green_text, is_sequence, is_string) - - -def _fix_args(args,flag=1): - if is_string(args): - return args.replace('%', '%%') - if flag and is_sequence(args): - return tuple([_fix_args(a, flag=0) for a in args]) - return args - - -class Log(old_Log): - def _log(self, level, msg, args): - if level >= self.threshold: - if args: - msg = msg % _fix_args(args) - if 0: - if msg.startswith('copying ') and msg.find(' -> ') != -1: - return - if msg.startswith('byte-compiling '): - return - print(_global_color_map[level](msg)) - sys.stdout.flush() - - def good(self, msg, *args): - """ - If we log WARN messages, log this message as a 'nice' anti-warn - message. - - """ - if WARN >= self.threshold: - if args: - print(green_text(msg % _fix_args(args))) - else: - print(green_text(msg)) - sys.stdout.flush() - - -_global_log.__class__ = Log - -good = _global_log.good - -def set_threshold(level, force=False): - prev_level = _global_log.threshold - if prev_level > DEBUG or force: - # If we're running at DEBUG, don't change the threshold, as there's - # likely a good reason why we're running at this level. - _global_log.threshold = level - if level <= DEBUG: - info('set_threshold: setting threshold to DEBUG level,' - ' it can be changed only with force argument') - else: - info('set_threshold: not changing threshold from DEBUG level' - ' %s to %s' % (prev_level, level)) - return prev_level - -def get_threshold(): - return _global_log.threshold - -def set_verbosity(v, force=False): - prev_level = _global_log.threshold - if v < 0: - set_threshold(ERROR, force) - elif v == 0: - set_threshold(WARN, force) - elif v == 1: - set_threshold(INFO, force) - elif v >= 2: - set_threshold(DEBUG, force) - return {FATAL:-2,ERROR:-1,WARN:0,INFO:1,DEBUG:2}.get(prev_level, 1) - - -_global_color_map = { - DEBUG:cyan_text, - INFO:default_text, - WARN:red_text, - ERROR:red_text, - FATAL:red_text -} - -# don't use INFO,.. flags in set_verbosity, these flags are for set_threshold. -set_verbosity(0, force=True) - - -_error = error -_warn = warn -_info = info -_debug = debug - - -def error(msg, *a, **kw): - _error(f"ERROR: {msg}", *a, **kw) - - -def warn(msg, *a, **kw): - _warn(f"WARN: {msg}", *a, **kw) - - -def info(msg, *a, **kw): - _info(f"INFO: {msg}", *a, **kw) - - -def debug(msg, *a, **kw): - _debug(f"DEBUG: {msg}", *a, **kw) diff --git a/numpy/distutils/mingw/gfortran_vs2003_hack.c b/numpy/distutils/mingw/gfortran_vs2003_hack.c deleted file mode 100644 index 485a675d8a1f..000000000000 --- a/numpy/distutils/mingw/gfortran_vs2003_hack.c +++ /dev/null @@ -1,6 +0,0 @@ -int _get_output_format(void) -{ - return 0; -} - -int _imp____lc_codepage = 0; diff --git a/numpy/distutils/mingw32ccompiler.py b/numpy/distutils/mingw32ccompiler.py deleted file mode 100644 index 944ba2d03b33..000000000000 --- a/numpy/distutils/mingw32ccompiler.py +++ /dev/null @@ -1,620 +0,0 @@ -""" -Support code for building Python extensions on Windows. - - # NT stuff - # 1. Make sure libpython.a exists for gcc. If not, build it. - # 2. Force windows to use gcc (we're struggling with MSVC and g77 support) - # 3. Force windows to use g77 - -""" -import os -import sys -import subprocess -import re -import textwrap - -# Overwrite certain distutils.ccompiler functions: -import numpy.distutils.ccompiler # noqa: F401 -from numpy.distutils import log -# NT stuff -# 1. Make sure libpython.a exists for gcc. If not, build it. -# 2. Force windows to use gcc (we're struggling with MSVC and g77 support) -# --> this is done in numpy/distutils/ccompiler.py -# 3. Force windows to use g77 - -import distutils.cygwinccompiler -from distutils.unixccompiler import UnixCCompiler - -try: - from distutils.msvccompiler import get_build_version as get_build_msvc_version -except ImportError: - def get_build_msvc_version(): - return None - -from distutils.errors import UnknownFileError -from numpy.distutils.misc_util import (msvc_runtime_library, - msvc_runtime_version, - msvc_runtime_major, - get_build_architecture) - -def get_msvcr_replacement(): - """Replacement for outdated version of get_msvcr from cygwinccompiler""" - msvcr = msvc_runtime_library() - return [] if msvcr is None else [msvcr] - - -# Useful to generate table of symbols from a dll -_START = re.compile(r'\[Ordinal/Name Pointer\] Table') -_TABLE = re.compile(r'^\s+\[([\s*[0-9]*)\] ([a-zA-Z0-9_]*)') - -# the same as cygwin plus some additional parameters -class Mingw32CCompiler(distutils.cygwinccompiler.CygwinCCompiler): - """ A modified MingW32 compiler compatible with an MSVC built Python. - - """ - - compiler_type = 'mingw32' - - def __init__ (self, - verbose=0, - dry_run=0, - force=0): - - distutils.cygwinccompiler.CygwinCCompiler.__init__ (self, verbose, - dry_run, force) - - # **changes: eric jones 4/11/01 - # 1. Check for import library on Windows. Build if it doesn't exist. - - build_import_library() - - # Check for custom msvc runtime library on Windows. Build if it doesn't exist. - msvcr_success = build_msvcr_library() - msvcr_dbg_success = build_msvcr_library(debug=True) - if msvcr_success or msvcr_dbg_success: - # add preprocessor statement for using customized msvcr lib - self.define_macro('NPY_MINGW_USE_CUSTOM_MSVCR') - - # Define the MSVC version as hint for MinGW - msvcr_version = msvc_runtime_version() - if msvcr_version: - self.define_macro('__MSVCRT_VERSION__', '0x%04i' % msvcr_version) - - # MS_WIN64 should be defined when building for amd64 on windows, - # but python headers define it only for MS compilers, which has all - # kind of bad consequences, like using Py_ModuleInit4 instead of - # Py_ModuleInit4_64, etc... So we add it here - if get_build_architecture() == 'AMD64': - self.set_executables( - compiler='gcc -g -DDEBUG -DMS_WIN64 -O0 -Wall', - compiler_so='gcc -g -DDEBUG -DMS_WIN64 -O0 -Wall ' - '-Wstrict-prototypes', - linker_exe='gcc -g', - linker_so='gcc -g -shared') - else: - self.set_executables( - compiler='gcc -O2 -Wall', - compiler_so='gcc -O2 -Wall -Wstrict-prototypes', - linker_exe='g++ ', - linker_so='g++ -shared') - # added for python2.3 support - # we can't pass it through set_executables because pre 2.2 would fail - self.compiler_cxx = ['g++'] - - # Maybe we should also append -mthreads, but then the finished dlls - # need another dll (mingwm10.dll see Mingw32 docs) (-mthreads: Support - # thread-safe exception handling on `Mingw32') - - # no additional libraries needed - #self.dll_libraries=[] - return - - # __init__ () - - def link(self, - target_desc, - objects, - output_filename, - output_dir, - libraries, - library_dirs, - runtime_library_dirs, - export_symbols = None, - debug=0, - extra_preargs=None, - extra_postargs=None, - build_temp=None, - target_lang=None): - # Include the appropriate MSVC runtime library if Python was built - # with MSVC >= 7.0 (MinGW standard is msvcrt) - runtime_library = msvc_runtime_library() - if runtime_library: - if not libraries: - libraries = [] - libraries.append(runtime_library) - args = (self, - target_desc, - objects, - output_filename, - output_dir, - libraries, - library_dirs, - runtime_library_dirs, - None, #export_symbols, we do this in our def-file - debug, - extra_preargs, - extra_postargs, - build_temp, - target_lang) - func = UnixCCompiler.link - func(*args[:func.__code__.co_argcount]) - return - - def object_filenames (self, - source_filenames, - strip_dir=0, - output_dir=''): - if output_dir is None: output_dir = '' - obj_names = [] - for src_name in source_filenames: - # use normcase to make sure '.rc' is really '.rc' and not '.RC' - (base, ext) = os.path.splitext (os.path.normcase(src_name)) - - # added these lines to strip off windows drive letters - # without it, .o files are placed next to .c files - # instead of the build directory - drv, base = os.path.splitdrive(base) - if drv: - base = base[1:] - - if ext not in (self.src_extensions + ['.rc', '.res']): - raise UnknownFileError( - "unknown file type '%s' (from '%s')" % \ - (ext, src_name)) - if strip_dir: - base = os.path.basename (base) - if ext == '.res' or ext == '.rc': - # these need to be compiled to object files - obj_names.append (os.path.join (output_dir, - base + ext + self.obj_extension)) - else: - obj_names.append (os.path.join (output_dir, - base + self.obj_extension)) - return obj_names - - # object_filenames () - - -def find_python_dll(): - # We can't do much here: - # - find it in the virtualenv (sys.prefix) - # - find it in python main dir (sys.base_prefix, if in a virtualenv) - # - in system32, - # - otherwise (Sxs), I don't know how to get it. - stems = [sys.prefix] - if sys.base_prefix != sys.prefix: - stems.append(sys.base_prefix) - - sub_dirs = ['', 'lib', 'bin'] - # generate possible combinations of directory trees and sub-directories - lib_dirs = [] - for stem in stems: - for folder in sub_dirs: - lib_dirs.append(os.path.join(stem, folder)) - - # add system directory as well - if 'SYSTEMROOT' in os.environ: - lib_dirs.append(os.path.join(os.environ['SYSTEMROOT'], 'System32')) - - # search in the file system for possible candidates - major_version, minor_version = tuple(sys.version_info[:2]) - implementation = sys.implementation.name - if implementation == 'cpython': - dllname = f'python{major_version}{minor_version}.dll' - elif implementation == 'pypy': - dllname = f'libpypy{major_version}.{minor_version}-c.dll' - else: - dllname = f'Unknown platform {implementation}' - print("Looking for %s" % dllname) - for folder in lib_dirs: - dll = os.path.join(folder, dllname) - if os.path.exists(dll): - return dll - - raise ValueError("%s not found in %s" % (dllname, lib_dirs)) - -def dump_table(dll): - st = subprocess.check_output(["objdump.exe", "-p", dll]) - return st.split(b'\n') - -def generate_def(dll, dfile): - """Given a dll file location, get all its exported symbols and dump them - into the given def file. - - The .def file will be overwritten""" - dump = dump_table(dll) - for i in range(len(dump)): - if _START.match(dump[i].decode()): - break - else: - raise ValueError("Symbol table not found") - - syms = [] - for j in range(i+1, len(dump)): - m = _TABLE.match(dump[j].decode()) - if m: - syms.append((int(m.group(1).strip()), m.group(2))) - else: - break - - if len(syms) == 0: - log.warn('No symbols found in %s' % dll) - - with open(dfile, 'w') as d: - d.write('LIBRARY %s\n' % os.path.basename(dll)) - d.write(';CODE PRELOAD MOVEABLE DISCARDABLE\n') - d.write(';DATA PRELOAD SINGLE\n') - d.write('\nEXPORTS\n') - for s in syms: - #d.write('@%d %s\n' % (s[0], s[1])) - d.write('%s\n' % s[1]) - -def find_dll(dll_name): - - arch = {'AMD64' : 'amd64', - 'ARM64' : 'arm64', - 'Intel' : 'x86'}[get_build_architecture()] - - def _find_dll_in_winsxs(dll_name): - # Walk through the WinSxS directory to find the dll. - winsxs_path = os.path.join(os.environ.get('WINDIR', r'C:\WINDOWS'), - 'winsxs') - if not os.path.exists(winsxs_path): - return None - for root, dirs, files in os.walk(winsxs_path): - if dll_name in files and arch in root: - return os.path.join(root, dll_name) - return None - - def _find_dll_in_path(dll_name): - # First, look in the Python directory, then scan PATH for - # the given dll name. - for path in [sys.prefix] + os.environ['PATH'].split(';'): - filepath = os.path.join(path, dll_name) - if os.path.exists(filepath): - return os.path.abspath(filepath) - - return _find_dll_in_winsxs(dll_name) or _find_dll_in_path(dll_name) - -def build_msvcr_library(debug=False): - if os.name != 'nt': - return False - - # If the version number is None, then we couldn't find the MSVC runtime at - # all, because we are running on a Python distribution which is customed - # compiled; trust that the compiler is the same as the one available to us - # now, and that it is capable of linking with the correct runtime without - # any extra options. - msvcr_ver = msvc_runtime_major() - if msvcr_ver is None: - log.debug('Skip building import library: ' - 'Runtime is not compiled with MSVC') - return False - - # Skip using a custom library for versions < MSVC 8.0 - if msvcr_ver < 80: - log.debug('Skip building msvcr library:' - ' custom functionality not present') - return False - - msvcr_name = msvc_runtime_library() - if debug: - msvcr_name += 'd' - - # Skip if custom library already exists - out_name = "lib%s.a" % msvcr_name - out_file = os.path.join(sys.prefix, 'libs', out_name) - if os.path.isfile(out_file): - log.debug('Skip building msvcr library: "%s" exists' % - (out_file,)) - return True - - # Find the msvcr dll - msvcr_dll_name = msvcr_name + '.dll' - dll_file = find_dll(msvcr_dll_name) - if not dll_file: - log.warn('Cannot build msvcr library: "%s" not found' % - msvcr_dll_name) - return False - - def_name = "lib%s.def" % msvcr_name - def_file = os.path.join(sys.prefix, 'libs', def_name) - - log.info('Building msvcr library: "%s" (from %s)' \ - % (out_file, dll_file)) - - # Generate a symbol definition file from the msvcr dll - generate_def(dll_file, def_file) - - # Create a custom mingw library for the given symbol definitions - cmd = ['dlltool', '-d', def_file, '-l', out_file] - retcode = subprocess.call(cmd) - - # Clean up symbol definitions - os.remove(def_file) - - return (not retcode) - -def build_import_library(): - if os.name != 'nt': - return - - arch = get_build_architecture() - if arch == 'AMD64': - return _build_import_library_amd64() - if arch == 'ARM64': - return _build_import_library_arm64() - elif arch == 'Intel': - return _build_import_library_x86() - else: - raise ValueError("Unhandled arch %s" % arch) - -def _check_for_import_lib(): - """Check if an import library for the Python runtime already exists.""" - major_version, minor_version = tuple(sys.version_info[:2]) - - # patterns for the file name of the library itself - patterns = ['libpython%d%d.a', - 'libpython%d%d.dll.a', - 'libpython%d.%d.dll.a'] - - # directory trees that may contain the library - stems = [sys.prefix] - if hasattr(sys, 'base_prefix') and sys.base_prefix != sys.prefix: - stems.append(sys.base_prefix) - elif hasattr(sys, 'real_prefix') and sys.real_prefix != sys.prefix: - stems.append(sys.real_prefix) - - # possible subdirectories within those trees where it is placed - sub_dirs = ['libs', 'lib'] - - # generate a list of candidate locations - candidates = [] - for pat in patterns: - filename = pat % (major_version, minor_version) - for stem_dir in stems: - for folder in sub_dirs: - candidates.append(os.path.join(stem_dir, folder, filename)) - - # test the filesystem to see if we can find any of these - for fullname in candidates: - if os.path.isfile(fullname): - # already exists, in location given - return (True, fullname) - - # needs to be built, preferred location given first - return (False, candidates[0]) - -def _build_import_library_amd64(): - out_exists, out_file = _check_for_import_lib() - if out_exists: - log.debug('Skip building import library: "%s" exists', out_file) - return - - # get the runtime dll for which we are building import library - dll_file = find_python_dll() - log.info('Building import library (arch=AMD64): "%s" (from %s)' % - (out_file, dll_file)) - - # generate symbol list from this library - def_name = "python%d%d.def" % tuple(sys.version_info[:2]) - def_file = os.path.join(sys.prefix, 'libs', def_name) - generate_def(dll_file, def_file) - - # generate import library from this symbol list - cmd = ['dlltool', '-d', def_file, '-l', out_file] - subprocess.check_call(cmd) - -def _build_import_library_arm64(): - out_exists, out_file = _check_for_import_lib() - if out_exists: - log.debug('Skip building import library: "%s" exists', out_file) - return - - # get the runtime dll for which we are building import library - dll_file = find_python_dll() - log.info('Building import library (arch=ARM64): "%s" (from %s)' % - (out_file, dll_file)) - - # generate symbol list from this library - def_name = "python%d%d.def" % tuple(sys.version_info[:2]) - def_file = os.path.join(sys.prefix, 'libs', def_name) - generate_def(dll_file, def_file) - - # generate import library from this symbol list - cmd = ['dlltool', '-d', def_file, '-l', out_file] - subprocess.check_call(cmd) - -def _build_import_library_x86(): - """ Build the import libraries for Mingw32-gcc on Windows - """ - out_exists, out_file = _check_for_import_lib() - if out_exists: - log.debug('Skip building import library: "%s" exists', out_file) - return - - lib_name = "python%d%d.lib" % tuple(sys.version_info[:2]) - lib_file = os.path.join(sys.prefix, 'libs', lib_name) - if not os.path.isfile(lib_file): - # didn't find library file in virtualenv, try base distribution, too, - # and use that instead if found there. for Python 2.7 venvs, the base - # directory is in attribute real_prefix instead of base_prefix. - if hasattr(sys, 'base_prefix'): - base_lib = os.path.join(sys.base_prefix, 'libs', lib_name) - elif hasattr(sys, 'real_prefix'): - base_lib = os.path.join(sys.real_prefix, 'libs', lib_name) - else: - base_lib = '' # os.path.isfile('') == False - - if os.path.isfile(base_lib): - lib_file = base_lib - else: - log.warn('Cannot build import library: "%s" not found', lib_file) - return - log.info('Building import library (ARCH=x86): "%s"', out_file) - - from numpy.distutils import lib2def - - def_name = "python%d%d.def" % tuple(sys.version_info[:2]) - def_file = os.path.join(sys.prefix, 'libs', def_name) - nm_output = lib2def.getnm( - lib2def.DEFAULT_NM + [lib_file], shell=False) - dlist, flist = lib2def.parse_nm(nm_output) - with open(def_file, 'w') as fid: - lib2def.output_def(dlist, flist, lib2def.DEF_HEADER, fid) - - dll_name = find_python_dll () - - cmd = ["dlltool", - "--dllname", dll_name, - "--def", def_file, - "--output-lib", out_file] - status = subprocess.check_output(cmd) - if status: - log.warn('Failed to build import library for gcc. Linking will fail.') - return - -#===================================== -# Dealing with Visual Studio MANIFESTS -#===================================== - -# Functions to deal with visual studio manifests. Manifest are a mechanism to -# enforce strong DLL versioning on windows, and has nothing to do with -# distutils MANIFEST. manifests are XML files with version info, and used by -# the OS loader; they are necessary when linking against a DLL not in the -# system path; in particular, official python 2.6 binary is built against the -# MS runtime 9 (the one from VS 2008), which is not available on most windows -# systems; python 2.6 installer does install it in the Win SxS (Side by side) -# directory, but this requires the manifest for this to work. This is a big -# mess, thanks MS for a wonderful system. - -# XXX: ideally, we should use exactly the same version as used by python. I -# submitted a patch to get this version, but it was only included for python -# 2.6.1 and above. So for versions below, we use a "best guess". -_MSVCRVER_TO_FULLVER = {} -if sys.platform == 'win32': - try: - import msvcrt - # I took one version in my SxS directory: no idea if it is the good - # one, and we can't retrieve it from python - _MSVCRVER_TO_FULLVER['80'] = "8.0.50727.42" - _MSVCRVER_TO_FULLVER['90'] = "9.0.21022.8" - # Value from msvcrt.CRT_ASSEMBLY_VERSION under Python 3.3.0 - # on Windows XP: - _MSVCRVER_TO_FULLVER['100'] = "10.0.30319.460" - crt_ver = getattr(msvcrt, 'CRT_ASSEMBLY_VERSION', None) - if crt_ver is not None: # Available at least back to Python 3.3 - maj, min = re.match(r'(\d+)\.(\d)', crt_ver).groups() - _MSVCRVER_TO_FULLVER[maj + min] = crt_ver - del maj, min - del crt_ver - except ImportError: - # If we are here, means python was not built with MSVC. Not sure what - # to do in that case: manifest building will fail, but it should not be - # used in that case anyway - log.warn('Cannot import msvcrt: using manifest will not be possible') - -def msvc_manifest_xml(maj, min): - """Given a major and minor version of the MSVCR, returns the - corresponding XML file.""" - try: - fullver = _MSVCRVER_TO_FULLVER[str(maj * 10 + min)] - except KeyError: - raise ValueError("Version %d,%d of MSVCRT not supported yet" % - (maj, min)) from None - # Don't be fooled, it looks like an XML, but it is not. In particular, it - # should not have any space before starting, and its size should be - # divisible by 4, most likely for alignment constraints when the xml is - # embedded in the binary... - # This template was copied directly from the python 2.6 binary (using - # strings.exe from mingw on python.exe). - template = textwrap.dedent("""\ - - - - - - - - - - - - - - """) - - return template % {'fullver': fullver, 'maj': maj, 'min': min} - -def manifest_rc(name, type='dll'): - """Return the rc file used to generate the res file which will be embedded - as manifest for given manifest file name, of given type ('dll' or - 'exe'). - - Parameters - ---------- - name : str - name of the manifest file to embed - type : str {'dll', 'exe'} - type of the binary which will embed the manifest - - """ - if type == 'dll': - rctype = 2 - elif type == 'exe': - rctype = 1 - else: - raise ValueError("Type %s not supported" % type) - - return """\ -#include "winuser.h" -%d RT_MANIFEST %s""" % (rctype, name) - -def check_embedded_msvcr_match_linked(msver): - """msver is the ms runtime version used for the MANIFEST.""" - # check msvcr major version are the same for linking and - # embedding - maj = msvc_runtime_major() - if maj: - if not maj == int(msver): - raise ValueError( - "Discrepancy between linked msvcr " \ - "(%d) and the one about to be embedded " \ - "(%d)" % (int(msver), maj)) - -def configtest_name(config): - base = os.path.basename(config._gen_temp_sourcefile("yo", [], "c")) - return os.path.splitext(base)[0] - -def manifest_name(config): - # Get configest name (including suffix) - root = configtest_name(config) - exext = config.compiler.exe_extension - return root + exext + ".manifest" - -def rc_name(config): - # Get configtest name (including suffix) - root = configtest_name(config) - return root + ".rc" - -def generate_manifest(config): - msver = get_build_msvc_version() - if msver is not None: - if msver >= 8: - check_embedded_msvcr_match_linked(msver) - ma_str, mi_str = str(msver).split('.') - # Write the manifest file - manxml = msvc_manifest_xml(int(ma_str), int(mi_str)) - with open(manifest_name(config), "w") as man: - config.temp_files.append(manifest_name(config)) - man.write(manxml) diff --git a/numpy/distutils/misc_util.py b/numpy/distutils/misc_util.py deleted file mode 100644 index 09145e1ddf52..000000000000 --- a/numpy/distutils/misc_util.py +++ /dev/null @@ -1,2484 +0,0 @@ -import os -import re -import sys -import copy -import glob -import atexit -import tempfile -import subprocess -import shutil -import multiprocessing -import textwrap -import importlib.util -from threading import local as tlocal -from functools import reduce - -import distutils -from distutils.errors import DistutilsError - -# stores temporary directory of each thread to only create one per thread -_tdata = tlocal() - -# store all created temporary directories so they can be deleted on exit -_tmpdirs = [] -def clean_up_temporary_directory(): - if _tmpdirs is not None: - for d in _tmpdirs: - try: - shutil.rmtree(d) - except OSError: - pass - -atexit.register(clean_up_temporary_directory) - -__all__ = ['Configuration', 'get_numpy_include_dirs', 'default_config_dict', - 'dict_append', 'appendpath', 'generate_config_py', - 'get_cmd', 'allpath', 'get_mathlibs', - 'terminal_has_colors', 'red_text', 'green_text', 'yellow_text', - 'blue_text', 'cyan_text', 'cyg2win32', 'mingw32', 'all_strings', - 'has_f_sources', 'has_cxx_sources', 'filter_sources', - 'get_dependencies', 'is_local_src_dir', 'get_ext_source_files', - 'get_script_files', 'get_lib_source_files', 'get_data_files', - 'dot_join', 'get_frame', 'minrelpath', 'njoin', - 'is_sequence', 'is_string', 'as_list', 'gpaths', 'get_language', - 'get_build_architecture', 'get_info', 'get_pkg_info', - 'get_num_build_jobs', 'sanitize_cxx_flags', - 'exec_mod_from_location'] - -class InstallableLib: - """ - Container to hold information on an installable library. - - Parameters - ---------- - name : str - Name of the installed library. - build_info : dict - Dictionary holding build information. - target_dir : str - Absolute path specifying where to install the library. - - See Also - -------- - Configuration.add_installed_library - - Notes - ----- - The three parameters are stored as attributes with the same names. - - """ - def __init__(self, name, build_info, target_dir): - self.name = name - self.build_info = build_info - self.target_dir = target_dir - - -def get_num_build_jobs(): - """ - Get number of parallel build jobs set by the --parallel command line - argument of setup.py - If the command did not receive a setting the environment variable - NPY_NUM_BUILD_JOBS is checked. If that is unset, return the number of - processors on the system, with a maximum of 8 (to prevent - overloading the system if there a lot of CPUs). - - Returns - ------- - out : int - number of parallel jobs that can be run - - """ - from numpy.distutils.core import get_distribution - try: - cpu_count = len(os.sched_getaffinity(0)) - except AttributeError: - cpu_count = multiprocessing.cpu_count() - cpu_count = min(cpu_count, 8) - envjobs = int(os.environ.get("NPY_NUM_BUILD_JOBS", cpu_count)) - dist = get_distribution() - # may be None during configuration - if dist is None: - return envjobs - - # any of these three may have the job set, take the largest - cmdattr = (getattr(dist.get_command_obj('build'), 'parallel', None), - getattr(dist.get_command_obj('build_ext'), 'parallel', None), - getattr(dist.get_command_obj('build_clib'), 'parallel', None)) - if all(x is None for x in cmdattr): - return envjobs - else: - return max(x for x in cmdattr if x is not None) - -def quote_args(args): - """Quote list of arguments. - - .. deprecated:: 1.22. - """ - import warnings - warnings.warn('"quote_args" is deprecated.', - DeprecationWarning, stacklevel=2) - # don't used _nt_quote_args as it does not check if - # args items already have quotes or not. - args = list(args) - for i in range(len(args)): - a = args[i] - if ' ' in a and a[0] not in '"\'': - args[i] = '"%s"' % (a) - return args - -def allpath(name): - "Convert a /-separated pathname to one using the OS's path separator." - split = name.split('/') - return os.path.join(*split) - -def rel_path(path, parent_path): - """Return path relative to parent_path.""" - # Use realpath to avoid issues with symlinked dirs (see gh-7707) - pd = os.path.realpath(os.path.abspath(parent_path)) - apath = os.path.realpath(os.path.abspath(path)) - if len(apath) < len(pd): - return path - if apath == pd: - return '' - if pd == apath[:len(pd)]: - assert apath[len(pd)] in [os.sep], repr((path, apath[len(pd)])) - path = apath[len(pd)+1:] - return path - -def get_path_from_frame(frame, parent_path=None): - """Return path of the module given a frame object from the call stack. - - Returned path is relative to parent_path when given, - otherwise it is absolute path. - """ - - # First, try to find if the file name is in the frame. - try: - caller_file = eval('__file__', frame.f_globals, frame.f_locals) - d = os.path.dirname(os.path.abspath(caller_file)) - except NameError: - # __file__ is not defined, so let's try __name__. We try this second - # because setuptools spoofs __name__ to be '__main__' even though - # sys.modules['__main__'] might be something else, like easy_install(1). - caller_name = eval('__name__', frame.f_globals, frame.f_locals) - __import__(caller_name) - mod = sys.modules[caller_name] - if hasattr(mod, '__file__'): - d = os.path.dirname(os.path.abspath(mod.__file__)) - else: - # we're probably running setup.py as execfile("setup.py") - # (likely we're building an egg) - d = os.path.abspath('.') - - if parent_path is not None: - d = rel_path(d, parent_path) - - return d or '.' - -def njoin(*path): - """Join two or more pathname components + - - convert a /-separated pathname to one using the OS's path separator. - - resolve `..` and `.` from path. - - Either passing n arguments as in njoin('a','b'), or a sequence - of n names as in njoin(['a','b']) is handled, or a mixture of such arguments. - """ - paths = [] - for p in path: - if is_sequence(p): - # njoin(['a', 'b'], 'c') - paths.append(njoin(*p)) - else: - assert is_string(p) - paths.append(p) - path = paths - if not path: - # njoin() - joined = '' - else: - # njoin('a', 'b') - joined = os.path.join(*path) - if os.path.sep != '/': - joined = joined.replace('/', os.path.sep) - return minrelpath(joined) - -def get_mathlibs(path=None): - """Return the MATHLIB line from numpyconfig.h - """ - if path is not None: - config_file = os.path.join(path, '_numpyconfig.h') - else: - # Look for the file in each of the numpy include directories. - dirs = get_numpy_include_dirs() - for path in dirs: - fn = os.path.join(path, '_numpyconfig.h') - if os.path.exists(fn): - config_file = fn - break - else: - raise DistutilsError('_numpyconfig.h not found in numpy include ' - 'dirs %r' % (dirs,)) - - with open(config_file) as fid: - mathlibs = [] - s = '#define MATHLIB' - for line in fid: - if line.startswith(s): - value = line[len(s):].strip() - if value: - mathlibs.extend(value.split(',')) - return mathlibs - -def minrelpath(path): - """Resolve `..` and '.' from path. - """ - if not is_string(path): - return path - if '.' not in path: - return path - l = path.split(os.sep) - while l: - try: - i = l.index('.', 1) - except ValueError: - break - del l[i] - j = 1 - while l: - try: - i = l.index('..', j) - except ValueError: - break - if l[i-1]=='..': - j += 1 - else: - del l[i], l[i-1] - j = 1 - if not l: - return '' - return os.sep.join(l) - -def sorted_glob(fileglob): - """sorts output of python glob for https://bugs.python.org/issue30461 - to allow extensions to have reproducible build results""" - return sorted(glob.glob(fileglob)) - -def _fix_paths(paths, local_path, include_non_existing): - assert is_sequence(paths), repr(type(paths)) - new_paths = [] - assert not is_string(paths), repr(paths) - for n in paths: - if is_string(n): - if '*' in n or '?' in n: - p = sorted_glob(n) - p2 = sorted_glob(njoin(local_path, n)) - if p2: - new_paths.extend(p2) - elif p: - new_paths.extend(p) - else: - if include_non_existing: - new_paths.append(n) - print('could not resolve pattern in %r: %r' % - (local_path, n)) - else: - n2 = njoin(local_path, n) - if os.path.exists(n2): - new_paths.append(n2) - else: - if os.path.exists(n): - new_paths.append(n) - elif include_non_existing: - new_paths.append(n) - if not os.path.exists(n): - print('non-existing path in %r: %r' % - (local_path, n)) - - elif is_sequence(n): - new_paths.extend(_fix_paths(n, local_path, include_non_existing)) - else: - new_paths.append(n) - return [minrelpath(p) for p in new_paths] - -def gpaths(paths, local_path='', include_non_existing=True): - """Apply glob to paths and prepend local_path if needed. - """ - if is_string(paths): - paths = (paths,) - return _fix_paths(paths, local_path, include_non_existing) - -def make_temp_file(suffix='', prefix='', text=True): - if not hasattr(_tdata, 'tempdir'): - _tdata.tempdir = tempfile.mkdtemp() - _tmpdirs.append(_tdata.tempdir) - fid, name = tempfile.mkstemp(suffix=suffix, - prefix=prefix, - dir=_tdata.tempdir, - text=text) - fo = os.fdopen(fid, 'w') - return fo, name - -# Hooks for colored terminal output. -# See also https://web.archive.org/web/20100314204946/http://www.livinglogic.de/Python/ansistyle -def terminal_has_colors(): - if sys.platform=='cygwin' and 'USE_COLOR' not in os.environ: - # Avoid importing curses that causes illegal operation - # with a message: - # PYTHON2 caused an invalid page fault in - # module CYGNURSES7.DLL as 015f:18bbfc28 - # Details: Python 2.3.3 [GCC 3.3.1 (cygming special)] - # ssh to Win32 machine from debian - # curses.version is 2.2 - # CYGWIN_98-4.10, release 1.5.7(0.109/3/2)) - return 0 - if hasattr(sys.stdout, 'isatty') and sys.stdout.isatty(): - try: - import curses - curses.setupterm() - if (curses.tigetnum("colors") >= 0 - and curses.tigetnum("pairs") >= 0 - and ((curses.tigetstr("setf") is not None - and curses.tigetstr("setb") is not None) - or (curses.tigetstr("setaf") is not None - and curses.tigetstr("setab") is not None) - or curses.tigetstr("scp") is not None)): - return 1 - except Exception: - pass - return 0 - -if terminal_has_colors(): - _colour_codes = dict(black=0, red=1, green=2, yellow=3, - blue=4, magenta=5, cyan=6, white=7, default=9) - def colour_text(s, fg=None, bg=None, bold=False): - seq = [] - if bold: - seq.append('1') - if fg: - fgcode = 30 + _colour_codes.get(fg.lower(), 0) - seq.append(str(fgcode)) - if bg: - bgcode = 40 + _colour_codes.get(bg.lower(), 7) - seq.append(str(bgcode)) - if seq: - return '\x1b[%sm%s\x1b[0m' % (';'.join(seq), s) - else: - return s -else: - def colour_text(s, fg=None, bg=None): - return s - -def default_text(s): - return colour_text(s, 'default') -def red_text(s): - return colour_text(s, 'red') -def green_text(s): - return colour_text(s, 'green') -def yellow_text(s): - return colour_text(s, 'yellow') -def cyan_text(s): - return colour_text(s, 'cyan') -def blue_text(s): - return colour_text(s, 'blue') - -######################### - -def cyg2win32(path: str) -> str: - """Convert a path from Cygwin-native to Windows-native. - - Uses the cygpath utility (part of the Base install) to do the - actual conversion. Falls back to returning the original path if - this fails. - - Handles the default ``/cygdrive`` mount prefix as well as the - ``/proc/cygdrive`` portable prefix, custom cygdrive prefixes such - as ``/`` or ``/mnt``, and absolute paths such as ``/usr/src/`` or - ``/home/username`` - - Parameters - ---------- - path : str - The path to convert - - Returns - ------- - converted_path : str - The converted path - - Notes - ----- - Documentation for cygpath utility: - https://cygwin.com/cygwin-ug-net/cygpath.html - Documentation for the C function it wraps: - https://cygwin.com/cygwin-api/func-cygwin-conv-path.html - - """ - if sys.platform != "cygwin": - return path - return subprocess.check_output( - ["/usr/bin/cygpath", "--windows", path], text=True - ) - - -def mingw32(): - """Return true when using mingw32 environment. - """ - if sys.platform=='win32': - if os.environ.get('OSTYPE', '')=='msys': - return True - if os.environ.get('MSYSTEM', '')=='MINGW32': - return True - return False - -def msvc_runtime_version(): - "Return version of MSVC runtime library, as defined by __MSC_VER__ macro" - msc_pos = sys.version.find('MSC v.') - if msc_pos != -1: - msc_ver = int(sys.version[msc_pos+6:msc_pos+10]) - else: - msc_ver = None - return msc_ver - -def msvc_runtime_library(): - "Return name of MSVC runtime library if Python was built with MSVC >= 7" - ver = msvc_runtime_major () - if ver: - if ver < 140: - return "msvcr%i" % ver - else: - return "vcruntime%i" % ver - else: - return None - -def msvc_runtime_major(): - "Return major version of MSVC runtime coded like get_build_msvc_version" - major = {1300: 70, # MSVC 7.0 - 1310: 71, # MSVC 7.1 - 1400: 80, # MSVC 8 - 1500: 90, # MSVC 9 (aka 2008) - 1600: 100, # MSVC 10 (aka 2010) - 1900: 140, # MSVC 14 (aka 2015) - }.get(msvc_runtime_version(), None) - return major - -######################### - -#XXX need support for .C that is also C++ -cxx_ext_match = re.compile(r'.*\.(cpp|cxx|cc)\Z', re.I).match -fortran_ext_match = re.compile(r'.*\.(f90|f95|f77|for|ftn|f)\Z', re.I).match -f90_ext_match = re.compile(r'.*\.(f90|f95)\Z', re.I).match -f90_module_name_match = re.compile(r'\s*module\s*(?P[\w_]+)', re.I).match -def _get_f90_modules(source): - """Return a list of Fortran f90 module names that - given source file defines. - """ - if not f90_ext_match(source): - return [] - modules = [] - with open(source) as f: - for line in f: - m = f90_module_name_match(line) - if m: - name = m.group('name') - modules.append(name) - # break # XXX can we assume that there is one module per file? - return modules - -def is_string(s): - return isinstance(s, str) - -def all_strings(lst): - """Return True if all items in lst are string objects. """ - return all(is_string(item) for item in lst) - -def is_sequence(seq): - if is_string(seq): - return False - try: - len(seq) - except Exception: - return False - return True - -def is_glob_pattern(s): - return is_string(s) and ('*' in s or '?' in s) - -def as_list(seq): - if is_sequence(seq): - return list(seq) - else: - return [seq] - -def get_language(sources): - # not used in numpy/scipy packages, use build_ext.detect_language instead - """Determine language value (c,f77,f90) from sources """ - language = None - for source in sources: - if isinstance(source, str): - if f90_ext_match(source): - language = 'f90' - break - elif fortran_ext_match(source): - language = 'f77' - return language - -def has_f_sources(sources): - """Return True if sources contains Fortran files """ - return any(fortran_ext_match(source) for source in sources) - -def has_cxx_sources(sources): - """Return True if sources contains C++ files """ - return any(cxx_ext_match(source) for source in sources) - -def filter_sources(sources): - """Return four lists of filenames containing - C, C++, Fortran, and Fortran 90 module sources, - respectively. - """ - c_sources = [] - cxx_sources = [] - f_sources = [] - fmodule_sources = [] - for source in sources: - if fortran_ext_match(source): - modules = _get_f90_modules(source) - if modules: - fmodule_sources.append(source) - else: - f_sources.append(source) - elif cxx_ext_match(source): - cxx_sources.append(source) - else: - c_sources.append(source) - return c_sources, cxx_sources, f_sources, fmodule_sources - - -def _get_headers(directory_list): - # get *.h files from list of directories - headers = [] - for d in directory_list: - head = sorted_glob(os.path.join(d, "*.h")) #XXX: *.hpp files?? - headers.extend(head) - return headers - -def _get_directories(list_of_sources): - # get unique directories from list of sources. - direcs = [] - for f in list_of_sources: - d = os.path.split(f) - if d[0] != '' and not d[0] in direcs: - direcs.append(d[0]) - return direcs - -def _commandline_dep_string(cc_args, extra_postargs, pp_opts): - """ - Return commandline representation used to determine if a file needs - to be recompiled - """ - cmdline = 'commandline: ' - cmdline += ' '.join(cc_args) - cmdline += ' '.join(extra_postargs) - cmdline += ' '.join(pp_opts) + '\n' - return cmdline - - -def get_dependencies(sources): - #XXX scan sources for include statements - return _get_headers(_get_directories(sources)) - -def is_local_src_dir(directory): - """Return true if directory is local directory. - """ - if not is_string(directory): - return False - abs_dir = os.path.abspath(directory) - c = os.path.commonprefix([os.getcwd(), abs_dir]) - new_dir = abs_dir[len(c):].split(os.sep) - if new_dir and not new_dir[0]: - new_dir = new_dir[1:] - if new_dir and new_dir[0]=='build': - return False - new_dir = os.sep.join(new_dir) - return os.path.isdir(new_dir) - -def general_source_files(top_path): - pruned_directories = {'CVS':1, '.svn':1, 'build':1} - prune_file_pat = re.compile(r'(?:[~#]|\.py[co]|\.o)$') - for dirpath, dirnames, filenames in os.walk(top_path, topdown=True): - pruned = [ d for d in dirnames if d not in pruned_directories ] - dirnames[:] = pruned - for f in filenames: - if not prune_file_pat.search(f): - yield os.path.join(dirpath, f) - -def general_source_directories_files(top_path): - """Return a directory name relative to top_path and - files contained. - """ - pruned_directories = ['CVS', '.svn', 'build'] - prune_file_pat = re.compile(r'(?:[~#]|\.py[co]|\.o)$') - for dirpath, dirnames, filenames in os.walk(top_path, topdown=True): - pruned = [ d for d in dirnames if d not in pruned_directories ] - dirnames[:] = pruned - for d in dirnames: - dpath = os.path.join(dirpath, d) - rpath = rel_path(dpath, top_path) - files = [] - for f in os.listdir(dpath): - fn = os.path.join(dpath, f) - if os.path.isfile(fn) and not prune_file_pat.search(fn): - files.append(fn) - yield rpath, files - dpath = top_path - rpath = rel_path(dpath, top_path) - filenames = [os.path.join(dpath, f) for f in os.listdir(dpath) \ - if not prune_file_pat.search(f)] - files = [f for f in filenames if os.path.isfile(f)] - yield rpath, files - - -def get_ext_source_files(ext): - # Get sources and any include files in the same directory. - filenames = [] - sources = [_m for _m in ext.sources if is_string(_m)] - filenames.extend(sources) - filenames.extend(get_dependencies(sources)) - for d in ext.depends: - if is_local_src_dir(d): - filenames.extend(list(general_source_files(d))) - elif os.path.isfile(d): - filenames.append(d) - return filenames - -def get_script_files(scripts): - scripts = [_m for _m in scripts if is_string(_m)] - return scripts - -def get_lib_source_files(lib): - filenames = [] - sources = lib[1].get('sources', []) - sources = [_m for _m in sources if is_string(_m)] - filenames.extend(sources) - filenames.extend(get_dependencies(sources)) - depends = lib[1].get('depends', []) - for d in depends: - if is_local_src_dir(d): - filenames.extend(list(general_source_files(d))) - elif os.path.isfile(d): - filenames.append(d) - return filenames - -def get_shared_lib_extension(is_python_ext=False): - """Return the correct file extension for shared libraries. - - Parameters - ---------- - is_python_ext : bool, optional - Whether the shared library is a Python extension. Default is False. - - Returns - ------- - so_ext : str - The shared library extension. - - Notes - ----- - For Python shared libs, `so_ext` will typically be '.so' on Linux and OS X, - and '.pyd' on Windows. For Python >= 3.2 `so_ext` has a tag prepended on - POSIX systems according to PEP 3149. - - """ - confvars = distutils.sysconfig.get_config_vars() - so_ext = confvars.get('EXT_SUFFIX', '') - - if not is_python_ext: - # hardcode known values, config vars (including SHLIB_SUFFIX) are - # unreliable (see #3182) - # darwin, windows and debug linux are wrong in 3.3.1 and older - if (sys.platform.startswith('linux') or - sys.platform.startswith('gnukfreebsd')): - so_ext = '.so' - elif sys.platform.startswith('darwin'): - so_ext = '.dylib' - elif sys.platform.startswith('win'): - so_ext = '.dll' - else: - # fall back to config vars for unknown platforms - # fix long extension for Python >=3.2, see PEP 3149. - if 'SOABI' in confvars: - # Does nothing unless SOABI config var exists - so_ext = so_ext.replace('.' + confvars.get('SOABI'), '', 1) - - return so_ext - -def get_data_files(data): - if is_string(data): - return [data] - sources = data[1] - filenames = [] - for s in sources: - if hasattr(s, '__call__'): - continue - if is_local_src_dir(s): - filenames.extend(list(general_source_files(s))) - elif is_string(s): - if os.path.isfile(s): - filenames.append(s) - else: - print('Not existing data file:', s) - else: - raise TypeError(repr(s)) - return filenames - -def dot_join(*args): - return '.'.join([a for a in args if a]) - -def get_frame(level=0): - """Return frame object from call stack with given level. - """ - try: - return sys._getframe(level+1) - except AttributeError: - frame = sys.exc_info()[2].tb_frame - for _ in range(level+1): - frame = frame.f_back - return frame - - -###################### - -class Configuration: - - _list_keys = ['packages', 'ext_modules', 'data_files', 'include_dirs', - 'libraries', 'headers', 'scripts', 'py_modules', - 'installed_libraries', 'define_macros'] - _dict_keys = ['package_dir', 'installed_pkg_config'] - _extra_keys = ['name', 'version'] - - numpy_include_dirs = [] - - def __init__(self, - package_name=None, - parent_name=None, - top_path=None, - package_path=None, - caller_level=1, - setup_name='setup.py', - **attrs): - """Construct configuration instance of a package. - - package_name -- name of the package - Ex.: 'distutils' - parent_name -- name of the parent package - Ex.: 'numpy' - top_path -- directory of the toplevel package - Ex.: the directory where the numpy package source sits - package_path -- directory of package. Will be computed by magic from the - directory of the caller module if not specified - Ex.: the directory where numpy.distutils is - caller_level -- frame level to caller namespace, internal parameter. - """ - self.name = dot_join(parent_name, package_name) - self.version = None - - caller_frame = get_frame(caller_level) - self.local_path = get_path_from_frame(caller_frame, top_path) - # local_path -- directory of a file (usually setup.py) that - # defines a configuration() function. - # local_path -- directory of a file (usually setup.py) that - # defines a configuration() function. - if top_path is None: - top_path = self.local_path - self.local_path = '' - if package_path is None: - package_path = self.local_path - elif os.path.isdir(njoin(self.local_path, package_path)): - package_path = njoin(self.local_path, package_path) - if not os.path.isdir(package_path or '.'): - raise ValueError("%r is not a directory" % (package_path,)) - self.top_path = top_path - self.package_path = package_path - # this is the relative path in the installed package - self.path_in_package = os.path.join(*self.name.split('.')) - - self.list_keys = self._list_keys[:] - self.dict_keys = self._dict_keys[:] - - for n in self.list_keys: - v = copy.copy(attrs.get(n, [])) - setattr(self, n, as_list(v)) - - for n in self.dict_keys: - v = copy.copy(attrs.get(n, {})) - setattr(self, n, v) - - known_keys = self.list_keys + self.dict_keys - self.extra_keys = self._extra_keys[:] - for n in attrs.keys(): - if n in known_keys: - continue - a = attrs[n] - setattr(self, n, a) - if isinstance(a, list): - self.list_keys.append(n) - elif isinstance(a, dict): - self.dict_keys.append(n) - else: - self.extra_keys.append(n) - - if os.path.exists(njoin(package_path, '__init__.py')): - self.packages.append(self.name) - self.package_dir[self.name] = package_path - - self.options = dict( - ignore_setup_xxx_py = False, - assume_default_configuration = False, - delegate_options_to_subpackages = False, - quiet = False, - ) - - caller_instance = None - for i in range(1, 3): - try: - f = get_frame(i) - except ValueError: - break - try: - caller_instance = eval('self', f.f_globals, f.f_locals) - break - except NameError: - pass - if isinstance(caller_instance, self.__class__): - if caller_instance.options['delegate_options_to_subpackages']: - self.set_options(**caller_instance.options) - - self.setup_name = setup_name - - def todict(self): - """ - Return a dictionary compatible with the keyword arguments of distutils - setup function. - - Examples - -------- - >>> setup(**config.todict()) #doctest: +SKIP - """ - - self._optimize_data_files() - d = {} - known_keys = self.list_keys + self.dict_keys + self.extra_keys - for n in known_keys: - a = getattr(self, n) - if a: - d[n] = a - return d - - def info(self, message): - if not self.options['quiet']: - print(message) - - def warn(self, message): - sys.stderr.write('Warning: %s\n' % (message,)) - - def set_options(self, **options): - """ - Configure Configuration instance. - - The following options are available: - - ignore_setup_xxx_py - - assume_default_configuration - - delegate_options_to_subpackages - - quiet - - """ - for key, value in options.items(): - if key in self.options: - self.options[key] = value - else: - raise ValueError('Unknown option: '+key) - - def get_distribution(self): - """Return the distutils distribution object for self.""" - from numpy.distutils.core import get_distribution - return get_distribution() - - def _wildcard_get_subpackage(self, subpackage_name, - parent_name, - caller_level = 1): - l = subpackage_name.split('.') - subpackage_path = njoin([self.local_path]+l) - dirs = [_m for _m in sorted_glob(subpackage_path) if os.path.isdir(_m)] - config_list = [] - for d in dirs: - if not os.path.isfile(njoin(d, '__init__.py')): - continue - if 'build' in d.split(os.sep): - continue - n = '.'.join(d.split(os.sep)[-len(l):]) - c = self.get_subpackage(n, - parent_name = parent_name, - caller_level = caller_level+1) - config_list.extend(c) - return config_list - - def _get_configuration_from_setup_py(self, setup_py, - subpackage_name, - subpackage_path, - parent_name, - caller_level = 1): - # In case setup_py imports local modules: - sys.path.insert(0, os.path.dirname(setup_py)) - try: - setup_name = os.path.splitext(os.path.basename(setup_py))[0] - n = dot_join(self.name, subpackage_name, setup_name) - setup_module = exec_mod_from_location( - '_'.join(n.split('.')), setup_py) - if not hasattr(setup_module, 'configuration'): - if not self.options['assume_default_configuration']: - self.warn('Assuming default configuration '\ - '(%s does not define configuration())'\ - % (setup_module)) - config = Configuration(subpackage_name, parent_name, - self.top_path, subpackage_path, - caller_level = caller_level + 1) - else: - pn = dot_join(*([parent_name] + subpackage_name.split('.')[:-1])) - args = (pn,) - if setup_module.configuration.__code__.co_argcount > 1: - args = args + (self.top_path,) - config = setup_module.configuration(*args) - if config.name!=dot_join(parent_name, subpackage_name): - self.warn('Subpackage %r configuration returned as %r' % \ - (dot_join(parent_name, subpackage_name), config.name)) - finally: - del sys.path[0] - return config - - def get_subpackage(self,subpackage_name, - subpackage_path=None, - parent_name=None, - caller_level = 1): - """Return list of subpackage configurations. - - Parameters - ---------- - subpackage_name : str or None - Name of the subpackage to get the configuration. '*' in - subpackage_name is handled as a wildcard. - subpackage_path : str - If None, then the path is assumed to be the local path plus the - subpackage_name. If a setup.py file is not found in the - subpackage_path, then a default configuration is used. - parent_name : str - Parent name. - """ - if subpackage_name is None: - if subpackage_path is None: - raise ValueError( - "either subpackage_name or subpackage_path must be specified") - subpackage_name = os.path.basename(subpackage_path) - - # handle wildcards - l = subpackage_name.split('.') - if subpackage_path is None and '*' in subpackage_name: - return self._wildcard_get_subpackage(subpackage_name, - parent_name, - caller_level = caller_level+1) - assert '*' not in subpackage_name, repr((subpackage_name, subpackage_path, parent_name)) - if subpackage_path is None: - subpackage_path = njoin([self.local_path] + l) - else: - subpackage_path = njoin([subpackage_path] + l[:-1]) - subpackage_path = self.paths([subpackage_path])[0] - setup_py = njoin(subpackage_path, self.setup_name) - if not self.options['ignore_setup_xxx_py']: - if not os.path.isfile(setup_py): - setup_py = njoin(subpackage_path, - 'setup_%s.py' % (subpackage_name)) - if not os.path.isfile(setup_py): - if not self.options['assume_default_configuration']: - self.warn('Assuming default configuration '\ - '(%s/{setup_%s,setup}.py was not found)' \ - % (os.path.dirname(setup_py), subpackage_name)) - config = Configuration(subpackage_name, parent_name, - self.top_path, subpackage_path, - caller_level = caller_level+1) - else: - config = self._get_configuration_from_setup_py( - setup_py, - subpackage_name, - subpackage_path, - parent_name, - caller_level = caller_level + 1) - if config: - return [config] - else: - return [] - - def add_subpackage(self,subpackage_name, - subpackage_path=None, - standalone = False): - """Add a sub-package to the current Configuration instance. - - This is useful in a setup.py script for adding sub-packages to a - package. - - Parameters - ---------- - subpackage_name : str - name of the subpackage - subpackage_path : str - if given, the subpackage path such as the subpackage is in - subpackage_path / subpackage_name. If None,the subpackage is - assumed to be located in the local path / subpackage_name. - standalone : bool - """ - - if standalone: - parent_name = None - else: - parent_name = self.name - config_list = self.get_subpackage(subpackage_name, subpackage_path, - parent_name = parent_name, - caller_level = 2) - if not config_list: - self.warn('No configuration returned, assuming unavailable.') - for config in config_list: - d = config - if isinstance(config, Configuration): - d = config.todict() - assert isinstance(d, dict), repr(type(d)) - - self.info('Appending %s configuration to %s' \ - % (d.get('name'), self.name)) - self.dict_append(**d) - - dist = self.get_distribution() - if dist is not None: - self.warn('distutils distribution has been initialized,'\ - ' it may be too late to add a subpackage '+ subpackage_name) - - def add_data_dir(self, data_path): - """Recursively add files under data_path to data_files list. - - Recursively add files under data_path to the list of data_files to be - installed (and distributed). The data_path can be either a relative - path-name, or an absolute path-name, or a 2-tuple where the first - argument shows where in the install directory the data directory - should be installed to. - - Parameters - ---------- - data_path : seq or str - Argument can be either - - * 2-sequence (, ) - * path to data directory where python datadir suffix defaults - to package dir. - - Notes - ----- - Rules for installation paths:: - - foo/bar -> (foo/bar, foo/bar) -> parent/foo/bar - (gun, foo/bar) -> parent/gun - foo/* -> (foo/a, foo/a), (foo/b, foo/b) -> parent/foo/a, parent/foo/b - (gun, foo/*) -> (gun, foo/a), (gun, foo/b) -> gun - (gun/*, foo/*) -> parent/gun/a, parent/gun/b - /foo/bar -> (bar, /foo/bar) -> parent/bar - (gun, /foo/bar) -> parent/gun - (fun/*/gun/*, sun/foo/bar) -> parent/fun/foo/gun/bar - - Examples - -------- - For example suppose the source directory contains fun/foo.dat and - fun/bar/car.dat: - - >>> self.add_data_dir('fun') #doctest: +SKIP - >>> self.add_data_dir(('sun', 'fun')) #doctest: +SKIP - >>> self.add_data_dir(('gun', '/full/path/to/fun'))#doctest: +SKIP - - Will install data-files to the locations:: - - / - fun/ - foo.dat - bar/ - car.dat - sun/ - foo.dat - bar/ - car.dat - gun/ - foo.dat - car.dat - - """ - if is_sequence(data_path): - d, data_path = data_path - else: - d = None - if is_sequence(data_path): - [self.add_data_dir((d, p)) for p in data_path] - return - if not is_string(data_path): - raise TypeError("not a string: %r" % (data_path,)) - if d is None: - if os.path.isabs(data_path): - return self.add_data_dir((os.path.basename(data_path), data_path)) - return self.add_data_dir((data_path, data_path)) - paths = self.paths(data_path, include_non_existing=False) - if is_glob_pattern(data_path): - if is_glob_pattern(d): - pattern_list = allpath(d).split(os.sep) - pattern_list.reverse() - # /a/*//b/ -> /a/*/b - rl = list(range(len(pattern_list)-1)); rl.reverse() - for i in rl: - if not pattern_list[i]: - del pattern_list[i] - # - for path in paths: - if not os.path.isdir(path): - print('Not a directory, skipping', path) - continue - rpath = rel_path(path, self.local_path) - path_list = rpath.split(os.sep) - path_list.reverse() - target_list = [] - i = 0 - for s in pattern_list: - if is_glob_pattern(s): - if i>=len(path_list): - raise ValueError('cannot fill pattern %r with %r' \ - % (d, path)) - target_list.append(path_list[i]) - else: - assert s==path_list[i], repr((s, path_list[i], data_path, d, path, rpath)) - target_list.append(s) - i += 1 - if path_list[i:]: - self.warn('mismatch of pattern_list=%s and path_list=%s'\ - % (pattern_list, path_list)) - target_list.reverse() - self.add_data_dir((os.sep.join(target_list), path)) - else: - for path in paths: - self.add_data_dir((d, path)) - return - assert not is_glob_pattern(d), repr(d) - - dist = self.get_distribution() - if dist is not None and dist.data_files is not None: - data_files = dist.data_files - else: - data_files = self.data_files - - for path in paths: - for d1, f in list(general_source_directories_files(path)): - target_path = os.path.join(self.path_in_package, d, d1) - data_files.append((target_path, f)) - - def _optimize_data_files(self): - data_dict = {} - for p, files in self.data_files: - if p not in data_dict: - data_dict[p] = set() - for f in files: - data_dict[p].add(f) - self.data_files[:] = [(p, list(files)) for p, files in data_dict.items()] - - def add_data_files(self,*files): - """Add data files to configuration data_files. - - Parameters - ---------- - files : sequence - Argument(s) can be either - - * 2-sequence (,) - * paths to data files where python datadir prefix defaults - to package dir. - - Notes - ----- - The form of each element of the files sequence is very flexible - allowing many combinations of where to get the files from the package - and where they should ultimately be installed on the system. The most - basic usage is for an element of the files argument sequence to be a - simple filename. This will cause that file from the local path to be - installed to the installation path of the self.name package (package - path). The file argument can also be a relative path in which case the - entire relative path will be installed into the package directory. - Finally, the file can be an absolute path name in which case the file - will be found at the absolute path name but installed to the package - path. - - This basic behavior can be augmented by passing a 2-tuple in as the - file argument. The first element of the tuple should specify the - relative path (under the package install directory) where the - remaining sequence of files should be installed to (it has nothing to - do with the file-names in the source distribution). The second element - of the tuple is the sequence of files that should be installed. The - files in this sequence can be filenames, relative paths, or absolute - paths. For absolute paths the file will be installed in the top-level - package installation directory (regardless of the first argument). - Filenames and relative path names will be installed in the package - install directory under the path name given as the first element of - the tuple. - - Rules for installation paths: - - #. file.txt -> (., file.txt)-> parent/file.txt - #. foo/file.txt -> (foo, foo/file.txt) -> parent/foo/file.txt - #. /foo/bar/file.txt -> (., /foo/bar/file.txt) -> parent/file.txt - #. ``*``.txt -> parent/a.txt, parent/b.txt - #. foo/``*``.txt`` -> parent/foo/a.txt, parent/foo/b.txt - #. ``*/*.txt`` -> (``*``, ``*``/``*``.txt) -> parent/c/a.txt, parent/d/b.txt - #. (sun, file.txt) -> parent/sun/file.txt - #. (sun, bar/file.txt) -> parent/sun/file.txt - #. (sun, /foo/bar/file.txt) -> parent/sun/file.txt - #. (sun, ``*``.txt) -> parent/sun/a.txt, parent/sun/b.txt - #. (sun, bar/``*``.txt) -> parent/sun/a.txt, parent/sun/b.txt - #. (sun/``*``, ``*``/``*``.txt) -> parent/sun/c/a.txt, parent/d/b.txt - - An additional feature is that the path to a data-file can actually be - a function that takes no arguments and returns the actual path(s) to - the data-files. This is useful when the data files are generated while - building the package. - - Examples - -------- - Add files to the list of data_files to be included with the package. - - >>> self.add_data_files('foo.dat', - ... ('fun', ['gun.dat', 'nun/pun.dat', '/tmp/sun.dat']), - ... 'bar/cat.dat', - ... '/full/path/to/can.dat') #doctest: +SKIP - - will install these data files to:: - - / - foo.dat - fun/ - gun.dat - nun/ - pun.dat - sun.dat - bar/ - car.dat - can.dat - - where is the package (or sub-package) - directory such as '/usr/lib/python2.4/site-packages/mypackage' ('C: - \\Python2.4 \\Lib \\site-packages \\mypackage') or - '/usr/lib/python2.4/site- packages/mypackage/mysubpackage' ('C: - \\Python2.4 \\Lib \\site-packages \\mypackage \\mysubpackage'). - """ - - if len(files)>1: - for f in files: - self.add_data_files(f) - return - assert len(files)==1 - if is_sequence(files[0]): - d, files = files[0] - else: - d = None - if is_string(files): - filepat = files - elif is_sequence(files): - if len(files)==1: - filepat = files[0] - else: - for f in files: - self.add_data_files((d, f)) - return - else: - raise TypeError(repr(type(files))) - - if d is None: - if hasattr(filepat, '__call__'): - d = '' - elif os.path.isabs(filepat): - d = '' - else: - d = os.path.dirname(filepat) - self.add_data_files((d, files)) - return - - paths = self.paths(filepat, include_non_existing=False) - if is_glob_pattern(filepat): - if is_glob_pattern(d): - pattern_list = d.split(os.sep) - pattern_list.reverse() - for path in paths: - path_list = path.split(os.sep) - path_list.reverse() - path_list.pop() # filename - target_list = [] - i = 0 - for s in pattern_list: - if is_glob_pattern(s): - target_list.append(path_list[i]) - i += 1 - else: - target_list.append(s) - target_list.reverse() - self.add_data_files((os.sep.join(target_list), path)) - else: - self.add_data_files((d, paths)) - return - assert not is_glob_pattern(d), repr((d, filepat)) - - dist = self.get_distribution() - if dist is not None and dist.data_files is not None: - data_files = dist.data_files - else: - data_files = self.data_files - - data_files.append((os.path.join(self.path_in_package, d), paths)) - - ### XXX Implement add_py_modules - - def add_define_macros(self, macros): - """Add define macros to configuration - - Add the given sequence of macro name and value duples to the beginning - of the define_macros list This list will be visible to all extension - modules of the current package. - """ - dist = self.get_distribution() - if dist is not None: - if not hasattr(dist, 'define_macros'): - dist.define_macros = [] - dist.define_macros.extend(macros) - else: - self.define_macros.extend(macros) - - - def add_include_dirs(self,*paths): - """Add paths to configuration include directories. - - Add the given sequence of paths to the beginning of the include_dirs - list. This list will be visible to all extension modules of the - current package. - """ - include_dirs = self.paths(paths) - dist = self.get_distribution() - if dist is not None: - if dist.include_dirs is None: - dist.include_dirs = [] - dist.include_dirs.extend(include_dirs) - else: - self.include_dirs.extend(include_dirs) - - def add_headers(self,*files): - """Add installable headers to configuration. - - Add the given sequence of files to the beginning of the headers list. - By default, headers will be installed under // directory. If an item of files - is a tuple, then its first argument specifies the actual installation - location relative to the path. - - Parameters - ---------- - files : str or seq - Argument(s) can be either: - - * 2-sequence (,) - * path(s) to header file(s) where python includedir suffix will - default to package name. - """ - headers = [] - for path in files: - if is_string(path): - [headers.append((self.name, p)) for p in self.paths(path)] - else: - if not isinstance(path, (tuple, list)) or len(path) != 2: - raise TypeError(repr(path)) - [headers.append((path[0], p)) for p in self.paths(path[1])] - dist = self.get_distribution() - if dist is not None: - if dist.headers is None: - dist.headers = [] - dist.headers.extend(headers) - else: - self.headers.extend(headers) - - def paths(self,*paths,**kws): - """Apply glob to paths and prepend local_path if needed. - - Applies glob.glob(...) to each path in the sequence (if needed) and - prepends the local_path if needed. Because this is called on all - source lists, this allows wildcard characters to be specified in lists - of sources for extension modules and libraries and scripts and allows - path-names be relative to the source directory. - - """ - include_non_existing = kws.get('include_non_existing', True) - return gpaths(paths, - local_path = self.local_path, - include_non_existing=include_non_existing) - - def _fix_paths_dict(self, kw): - for k in kw.keys(): - v = kw[k] - if k in ['sources', 'depends', 'include_dirs', 'library_dirs', - 'module_dirs', 'extra_objects']: - new_v = self.paths(v) - kw[k] = new_v - - def add_extension(self,name,sources,**kw): - """Add extension to configuration. - - Create and add an Extension instance to the ext_modules list. This - method also takes the following optional keyword arguments that are - passed on to the Extension constructor. - - Parameters - ---------- - name : str - name of the extension - sources : seq - list of the sources. The list of sources may contain functions - (called source generators) which must take an extension instance - and a build directory as inputs and return a source file or list of - source files or None. If None is returned then no sources are - generated. If the Extension instance has no sources after - processing all source generators, then no extension module is - built. - include_dirs : - define_macros : - undef_macros : - library_dirs : - libraries : - runtime_library_dirs : - extra_objects : - extra_compile_args : - extra_link_args : - extra_f77_compile_args : - extra_f90_compile_args : - export_symbols : - swig_opts : - depends : - The depends list contains paths to files or directories that the - sources of the extension module depend on. If any path in the - depends list is newer than the extension module, then the module - will be rebuilt. - language : - f2py_options : - module_dirs : - extra_info : dict or list - dict or list of dict of keywords to be appended to keywords. - - Notes - ----- - The self.paths(...) method is applied to all lists that may contain - paths. - """ - ext_args = copy.copy(kw) - ext_args['name'] = dot_join(self.name, name) - ext_args['sources'] = sources - - if 'extra_info' in ext_args: - extra_info = ext_args['extra_info'] - del ext_args['extra_info'] - if isinstance(extra_info, dict): - extra_info = [extra_info] - for info in extra_info: - assert isinstance(info, dict), repr(info) - dict_append(ext_args,**info) - - self._fix_paths_dict(ext_args) - - # Resolve out-of-tree dependencies - libraries = ext_args.get('libraries', []) - libnames = [] - ext_args['libraries'] = [] - for libname in libraries: - if isinstance(libname, tuple): - self._fix_paths_dict(libname[1]) - - # Handle library names of the form libname@relative/path/to/library - if '@' in libname: - lname, lpath = libname.split('@', 1) - lpath = os.path.abspath(njoin(self.local_path, lpath)) - if os.path.isdir(lpath): - c = self.get_subpackage(None, lpath, - caller_level = 2) - if isinstance(c, Configuration): - c = c.todict() - for l in [l[0] for l in c.get('libraries', [])]: - llname = l.split('__OF__', 1)[0] - if llname == lname: - c.pop('name', None) - dict_append(ext_args,**c) - break - continue - libnames.append(libname) - - ext_args['libraries'] = libnames + ext_args['libraries'] - ext_args['define_macros'] = \ - self.define_macros + ext_args.get('define_macros', []) - - from numpy.distutils.core import Extension - ext = Extension(**ext_args) - self.ext_modules.append(ext) - - dist = self.get_distribution() - if dist is not None: - self.warn('distutils distribution has been initialized,'\ - ' it may be too late to add an extension '+name) - return ext - - def add_library(self,name,sources,**build_info): - """ - Add library to configuration. - - Parameters - ---------- - name : str - Name of the extension. - sources : sequence - List of the sources. The list of sources may contain functions - (called source generators) which must take an extension instance - and a build directory as inputs and return a source file or list of - source files or None. If None is returned then no sources are - generated. If the Extension instance has no sources after - processing all source generators, then no extension module is - built. - build_info : dict, optional - The following keys are allowed: - - * depends - * macros - * include_dirs - * extra_compiler_args - * extra_f77_compile_args - * extra_f90_compile_args - * f2py_options - * language - - """ - self._add_library(name, sources, None, build_info) - - dist = self.get_distribution() - if dist is not None: - self.warn('distutils distribution has been initialized,'\ - ' it may be too late to add a library '+ name) - - def _add_library(self, name, sources, install_dir, build_info): - """Common implementation for add_library and add_installed_library. Do - not use directly""" - build_info = copy.copy(build_info) - build_info['sources'] = sources - - # Sometimes, depends is not set up to an empty list by default, and if - # depends is not given to add_library, distutils barfs (#1134) - if not 'depends' in build_info: - build_info['depends'] = [] - - self._fix_paths_dict(build_info) - - # Add to libraries list so that it is build with build_clib - self.libraries.append((name, build_info)) - - def add_installed_library(self, name, sources, install_dir, build_info=None): - """ - Similar to add_library, but the specified library is installed. - - Most C libraries used with ``distutils`` are only used to build python - extensions, but libraries built through this method will be installed - so that they can be reused by third-party packages. - - Parameters - ---------- - name : str - Name of the installed library. - sources : sequence - List of the library's source files. See `add_library` for details. - install_dir : str - Path to install the library, relative to the current sub-package. - build_info : dict, optional - The following keys are allowed: - - * depends - * macros - * include_dirs - * extra_compiler_args - * extra_f77_compile_args - * extra_f90_compile_args - * f2py_options - * language - - Returns - ------- - None - - See Also - -------- - add_library, add_npy_pkg_config, get_info - - Notes - ----- - The best way to encode the options required to link against the specified - C libraries is to use a "libname.ini" file, and use `get_info` to - retrieve the required options (see `add_npy_pkg_config` for more - information). - - """ - if not build_info: - build_info = {} - - install_dir = os.path.join(self.package_path, install_dir) - self._add_library(name, sources, install_dir, build_info) - self.installed_libraries.append(InstallableLib(name, build_info, install_dir)) - - def add_npy_pkg_config(self, template, install_dir, subst_dict=None): - """ - Generate and install a npy-pkg config file from a template. - - The config file generated from `template` is installed in the - given install directory, using `subst_dict` for variable substitution. - - Parameters - ---------- - template : str - The path of the template, relatively to the current package path. - install_dir : str - Where to install the npy-pkg config file, relatively to the current - package path. - subst_dict : dict, optional - If given, any string of the form ``@key@`` will be replaced by - ``subst_dict[key]`` in the template file when installed. The install - prefix is always available through the variable ``@prefix@``, since the - install prefix is not easy to get reliably from setup.py. - - See also - -------- - add_installed_library, get_info - - Notes - ----- - This works for both standard installs and in-place builds, i.e. the - ``@prefix@`` refer to the source directory for in-place builds. - - Examples - -------- - :: - - config.add_npy_pkg_config('foo.ini.in', 'lib', {'foo': bar}) - - Assuming the foo.ini.in file has the following content:: - - [meta] - Name=@foo@ - Version=1.0 - Description=dummy description - - [default] - Cflags=-I@prefix@/include - Libs= - - The generated file will have the following content:: - - [meta] - Name=bar - Version=1.0 - Description=dummy description - - [default] - Cflags=-Iprefix_dir/include - Libs= - - and will be installed as foo.ini in the 'lib' subpath. - - When cross-compiling with numpy distutils, it might be necessary to - use modified npy-pkg-config files. Using the default/generated files - will link with the host libraries (i.e. libnpymath.a). For - cross-compilation you of-course need to link with target libraries, - while using the host Python installation. - - You can copy out the numpy/_core/lib/npy-pkg-config directory, add a - pkgdir value to the .ini files and set NPY_PKG_CONFIG_PATH environment - variable to point to the directory with the modified npy-pkg-config - files. - - Example npymath.ini modified for cross-compilation:: - - [meta] - Name=npymath - Description=Portable, core math library implementing C99 standard - Version=0.1 - - [variables] - pkgname=numpy._core - pkgdir=/build/arm-linux-gnueabi/sysroot/usr/lib/python3.7/site-packages/numpy/_core - prefix=${pkgdir} - libdir=${prefix}/lib - includedir=${prefix}/include - - [default] - Libs=-L${libdir} -lnpymath - Cflags=-I${includedir} - Requires=mlib - - [msvc] - Libs=/LIBPATH:${libdir} npymath.lib - Cflags=/INCLUDE:${includedir} - Requires=mlib - - """ - if subst_dict is None: - subst_dict = {} - template = os.path.join(self.package_path, template) - - if self.name in self.installed_pkg_config: - self.installed_pkg_config[self.name].append((template, install_dir, - subst_dict)) - else: - self.installed_pkg_config[self.name] = [(template, install_dir, - subst_dict)] - - - def add_scripts(self,*files): - """Add scripts to configuration. - - Add the sequence of files to the beginning of the scripts list. - Scripts will be installed under the /bin/ directory. - - """ - scripts = self.paths(files) - dist = self.get_distribution() - if dist is not None: - if dist.scripts is None: - dist.scripts = [] - dist.scripts.extend(scripts) - else: - self.scripts.extend(scripts) - - def dict_append(self,**dict): - for key in self.list_keys: - a = getattr(self, key) - a.extend(dict.get(key, [])) - for key in self.dict_keys: - a = getattr(self, key) - a.update(dict.get(key, {})) - known_keys = self.list_keys + self.dict_keys + self.extra_keys - for key in dict.keys(): - if key not in known_keys: - a = getattr(self, key, None) - if a and a==dict[key]: continue - self.warn('Inheriting attribute %r=%r from %r' \ - % (key, dict[key], dict.get('name', '?'))) - setattr(self, key, dict[key]) - self.extra_keys.append(key) - elif key in self.extra_keys: - self.info('Ignoring attempt to set %r (from %r to %r)' \ - % (key, getattr(self, key), dict[key])) - elif key in known_keys: - # key is already processed above - pass - else: - raise ValueError("Don't know about key=%r" % (key)) - - def __str__(self): - from pprint import pformat - known_keys = self.list_keys + self.dict_keys + self.extra_keys - s = '<'+5*'-' + '\n' - s += 'Configuration of '+self.name+':\n' - known_keys.sort() - for k in known_keys: - a = getattr(self, k, None) - if a: - s += '%s = %s\n' % (k, pformat(a)) - s += 5*'-' + '>' - return s - - def get_config_cmd(self): - """ - Returns the numpy.distutils config command instance. - """ - cmd = get_cmd('config') - cmd.ensure_finalized() - cmd.dump_source = 0 - cmd.noisy = 0 - old_path = os.environ.get('PATH') - if old_path: - path = os.pathsep.join(['.', old_path]) - os.environ['PATH'] = path - return cmd - - def get_build_temp_dir(self): - """ - Return a path to a temporary directory where temporary files should be - placed. - """ - cmd = get_cmd('build') - cmd.ensure_finalized() - return cmd.build_temp - - def have_f77c(self): - """Check for availability of Fortran 77 compiler. - - Use it inside source generating function to ensure that - setup distribution instance has been initialized. - - Notes - ----- - True if a Fortran 77 compiler is available (because a simple Fortran 77 - code was able to be compiled successfully). - """ - simple_fortran_subroutine = ''' - subroutine simple - end - ''' - config_cmd = self.get_config_cmd() - flag = config_cmd.try_compile(simple_fortran_subroutine, lang='f77') - return flag - - def have_f90c(self): - """Check for availability of Fortran 90 compiler. - - Use it inside source generating function to ensure that - setup distribution instance has been initialized. - - Notes - ----- - True if a Fortran 90 compiler is available (because a simple Fortran - 90 code was able to be compiled successfully) - """ - simple_fortran_subroutine = ''' - subroutine simple - end - ''' - config_cmd = self.get_config_cmd() - flag = config_cmd.try_compile(simple_fortran_subroutine, lang='f90') - return flag - - def append_to(self, extlib): - """Append libraries, include_dirs to extension or library item. - """ - if is_sequence(extlib): - lib_name, build_info = extlib - dict_append(build_info, - libraries=self.libraries, - include_dirs=self.include_dirs) - else: - from numpy.distutils.core import Extension - assert isinstance(extlib, Extension), repr(extlib) - extlib.libraries.extend(self.libraries) - extlib.include_dirs.extend(self.include_dirs) - - def _get_svn_revision(self, path): - """Return path's SVN revision number. - """ - try: - output = subprocess.check_output(['svnversion'], cwd=path) - except (subprocess.CalledProcessError, OSError): - pass - else: - m = re.match(rb'(?P\d+)', output) - if m: - return int(m.group('revision')) - - if sys.platform=='win32' and os.environ.get('SVN_ASP_DOT_NET_HACK', None): - entries = njoin(path, '_svn', 'entries') - else: - entries = njoin(path, '.svn', 'entries') - if os.path.isfile(entries): - with open(entries) as f: - fstr = f.read() - if fstr[:5] == '\d+)"', fstr) - if m: - return int(m.group('revision')) - else: # non-xml entries file --- check to be sure that - m = re.search(r'dir[\n\r]+(?P\d+)', fstr) - if m: - return int(m.group('revision')) - return None - - def _get_hg_revision(self, path): - """Return path's Mercurial revision number. - """ - try: - output = subprocess.check_output( - ['hg', 'identify', '--num'], cwd=path) - except (subprocess.CalledProcessError, OSError): - pass - else: - m = re.match(rb'(?P\d+)', output) - if m: - return int(m.group('revision')) - - branch_fn = njoin(path, '.hg', 'branch') - branch_cache_fn = njoin(path, '.hg', 'branch.cache') - - if os.path.isfile(branch_fn): - branch0 = None - with open(branch_fn) as f: - revision0 = f.read().strip() - - branch_map = {} - with open(branch_cache_fn) as f: - for line in f: - branch1, revision1 = line.split()[:2] - if revision1==revision0: - branch0 = branch1 - try: - revision1 = int(revision1) - except ValueError: - continue - branch_map[branch1] = revision1 - - return branch_map.get(branch0) - - return None - - - def get_version(self, version_file=None, version_variable=None): - """Try to get version string of a package. - - Return a version string of the current package or None if the version - information could not be detected. - - Notes - ----- - This method scans files named - __version__.py, _version.py, version.py, and - __svn_version__.py for string variables version, __version__, and - _version, until a version number is found. - """ - version = getattr(self, 'version', None) - if version is not None: - return version - - # Get version from version file. - if version_file is None: - files = ['__version__.py', - self.name.split('.')[-1]+'_version.py', - 'version.py', - '__svn_version__.py', - '__hg_version__.py'] - else: - files = [version_file] - if version_variable is None: - version_vars = ['version', - '__version__', - self.name.split('.')[-1]+'_version'] - else: - version_vars = [version_variable] - for f in files: - fn = njoin(self.local_path, f) - if os.path.isfile(fn): - info = ('.py', 'U', 1) - name = os.path.splitext(os.path.basename(fn))[0] - n = dot_join(self.name, name) - try: - version_module = exec_mod_from_location( - '_'.join(n.split('.')), fn) - except ImportError as e: - self.warn(str(e)) - version_module = None - if version_module is None: - continue - - for a in version_vars: - version = getattr(version_module, a, None) - if version is not None: - break - - # Try if versioneer module - try: - version = version_module.get_versions()['version'] - except AttributeError: - pass - - if version is not None: - break - - if version is not None: - self.version = version - return version - - # Get version as SVN or Mercurial revision number - revision = self._get_svn_revision(self.local_path) - if revision is None: - revision = self._get_hg_revision(self.local_path) - - if revision is not None: - version = str(revision) - self.version = version - - return version - - def make_svn_version_py(self, delete=True): - """Appends a data function to the data_files list that will generate - __svn_version__.py file to the current package directory. - - Generate package __svn_version__.py file from SVN revision number, - it will be removed after python exits but will be available - when sdist, etc commands are executed. - - Notes - ----- - If __svn_version__.py existed before, nothing is done. - - This is - intended for working with source directories that are in an SVN - repository. - """ - target = njoin(self.local_path, '__svn_version__.py') - revision = self._get_svn_revision(self.local_path) - if os.path.isfile(target) or revision is None: - return - else: - def generate_svn_version_py(): - if not os.path.isfile(target): - version = str(revision) - self.info('Creating %s (version=%r)' % (target, version)) - with open(target, 'w') as f: - f.write('version = %r\n' % (version)) - - def rm_file(f=target,p=self.info): - if delete: - try: os.remove(f); p('removed '+f) - except OSError: pass - try: os.remove(f+'c'); p('removed '+f+'c') - except OSError: pass - - atexit.register(rm_file) - - return target - - self.add_data_files(('', generate_svn_version_py())) - - def make_hg_version_py(self, delete=True): - """Appends a data function to the data_files list that will generate - __hg_version__.py file to the current package directory. - - Generate package __hg_version__.py file from Mercurial revision, - it will be removed after python exits but will be available - when sdist, etc commands are executed. - - Notes - ----- - If __hg_version__.py existed before, nothing is done. - - This is intended for working with source directories that are - in an Mercurial repository. - """ - target = njoin(self.local_path, '__hg_version__.py') - revision = self._get_hg_revision(self.local_path) - if os.path.isfile(target) or revision is None: - return - else: - def generate_hg_version_py(): - if not os.path.isfile(target): - version = str(revision) - self.info('Creating %s (version=%r)' % (target, version)) - with open(target, 'w') as f: - f.write('version = %r\n' % (version)) - - def rm_file(f=target,p=self.info): - if delete: - try: os.remove(f); p('removed '+f) - except OSError: pass - try: os.remove(f+'c'); p('removed '+f+'c') - except OSError: pass - - atexit.register(rm_file) - - return target - - self.add_data_files(('', generate_hg_version_py())) - - def make_config_py(self,name='__config__'): - """Generate package __config__.py file containing system_info - information used during building the package. - - This file is installed to the - package installation directory. - - """ - self.py_modules.append((self.name, name, generate_config_py)) - - def get_info(self,*names): - """Get resources information. - - Return information (from system_info.get_info) for all of the names in - the argument list in a single dictionary. - """ - from .system_info import get_info, dict_append - info_dict = {} - for a in names: - dict_append(info_dict,**get_info(a)) - return info_dict - - -def get_cmd(cmdname, _cache={}): - if cmdname not in _cache: - import distutils.core - dist = distutils.core._setup_distribution - if dist is None: - from distutils.errors import DistutilsInternalError - raise DistutilsInternalError( - 'setup distribution instance not initialized') - cmd = dist.get_command_obj(cmdname) - _cache[cmdname] = cmd - return _cache[cmdname] - -def get_numpy_include_dirs(): - # numpy_include_dirs are set by numpy/_core/setup.py, otherwise [] - include_dirs = Configuration.numpy_include_dirs[:] - if not include_dirs: - import numpy - include_dirs = [ numpy.get_include() ] - # else running numpy/_core/setup.py - return include_dirs - -def get_npy_pkg_dir(): - """Return the path where to find the npy-pkg-config directory. - - If the NPY_PKG_CONFIG_PATH environment variable is set, the value of that - is returned. Otherwise, a path inside the location of the numpy module is - returned. - - The NPY_PKG_CONFIG_PATH can be useful when cross-compiling, maintaining - customized npy-pkg-config .ini files for the cross-compilation - environment, and using them when cross-compiling. - - """ - d = os.environ.get('NPY_PKG_CONFIG_PATH') - if d is not None: - return d - spec = importlib.util.find_spec('numpy') - d = os.path.join(os.path.dirname(spec.origin), - '_core', 'lib', 'npy-pkg-config') - return d - -def get_pkg_info(pkgname, dirs=None): - """ - Return library info for the given package. - - Parameters - ---------- - pkgname : str - Name of the package (should match the name of the .ini file, without - the extension, e.g. foo for the file foo.ini). - dirs : sequence, optional - If given, should be a sequence of additional directories where to look - for npy-pkg-config files. Those directories are searched prior to the - NumPy directory. - - Returns - ------- - pkginfo : class instance - The `LibraryInfo` instance containing the build information. - - Raises - ------ - PkgNotFound - If the package is not found. - - See Also - -------- - Configuration.add_npy_pkg_config, Configuration.add_installed_library, - get_info - - """ - from numpy.distutils.npy_pkg_config import read_config - - if dirs: - dirs.append(get_npy_pkg_dir()) - else: - dirs = [get_npy_pkg_dir()] - return read_config(pkgname, dirs) - -def get_info(pkgname, dirs=None): - """ - Return an info dict for a given C library. - - The info dict contains the necessary options to use the C library. - - Parameters - ---------- - pkgname : str - Name of the package (should match the name of the .ini file, without - the extension, e.g. foo for the file foo.ini). - dirs : sequence, optional - If given, should be a sequence of additional directories where to look - for npy-pkg-config files. Those directories are searched prior to the - NumPy directory. - - Returns - ------- - info : dict - The dictionary with build information. - - Raises - ------ - PkgNotFound - If the package is not found. - - See Also - -------- - Configuration.add_npy_pkg_config, Configuration.add_installed_library, - get_pkg_info - - Examples - -------- - To get the necessary information for the npymath library from NumPy: - - >>> npymath_info = np.distutils.misc_util.get_info('npymath') - >>> npymath_info #doctest: +SKIP - {'define_macros': [], 'libraries': ['npymath'], 'library_dirs': - ['.../numpy/_core/lib'], 'include_dirs': ['.../numpy/_core/include']} - - This info dict can then be used as input to a `Configuration` instance:: - - config.add_extension('foo', sources=['foo.c'], extra_info=npymath_info) - - """ - from numpy.distutils.npy_pkg_config import parse_flags - pkg_info = get_pkg_info(pkgname, dirs) - - # Translate LibraryInfo instance into a build_info dict - info = parse_flags(pkg_info.cflags()) - for k, v in parse_flags(pkg_info.libs()).items(): - info[k].extend(v) - - # add_extension extra_info argument is ANAL - info['define_macros'] = info['macros'] - del info['macros'] - del info['ignored'] - - return info - -def is_bootstrapping(): - import builtins - - try: - builtins.__NUMPY_SETUP__ - return True - except AttributeError: - return False - - -######################### - -def default_config_dict(name = None, parent_name = None, local_path=None): - """Return a configuration dictionary for usage in - configuration() function defined in file setup_.py. - """ - import warnings - warnings.warn('Use Configuration(%r,%r,top_path=%r) instead of '\ - 'deprecated default_config_dict(%r,%r,%r)' - % (name, parent_name, local_path, - name, parent_name, local_path, - ), stacklevel=2) - c = Configuration(name, parent_name, local_path) - return c.todict() - - -def dict_append(d, **kws): - for k, v in kws.items(): - if k in d: - ov = d[k] - if isinstance(ov, str): - d[k] = v - else: - d[k].extend(v) - else: - d[k] = v - -def appendpath(prefix, path): - if os.path.sep != '/': - prefix = prefix.replace('/', os.path.sep) - path = path.replace('/', os.path.sep) - drive = '' - if os.path.isabs(path): - drive = os.path.splitdrive(prefix)[0] - absprefix = os.path.splitdrive(os.path.abspath(prefix))[1] - pathdrive, path = os.path.splitdrive(path) - d = os.path.commonprefix([absprefix, path]) - if os.path.join(absprefix[:len(d)], absprefix[len(d):]) != absprefix \ - or os.path.join(path[:len(d)], path[len(d):]) != path: - # Handle invalid paths - d = os.path.dirname(d) - subpath = path[len(d):] - if os.path.isabs(subpath): - subpath = subpath[1:] - else: - subpath = path - return os.path.normpath(njoin(drive + prefix, subpath)) - -def generate_config_py(target): - """Generate config.py file containing system_info information - used during building the package. - - Usage: - config['py_modules'].append((packagename, '__config__',generate_config_py)) - """ - from numpy.distutils.system_info import system_info - from distutils.dir_util import mkpath - mkpath(os.path.dirname(target)) - with open(target, 'w') as f: - f.write('# This file is generated by numpy\'s %s\n' % (os.path.basename(sys.argv[0]))) - f.write('# It contains system_info results at the time of building this package.\n') - f.write('__all__ = ["get_info","show"]\n\n') - - # For gfortran+msvc combination, extra shared libraries may exist - f.write(textwrap.dedent(""" - import os - import sys - - extra_dll_dir = os.path.join(os.path.dirname(__file__), '.libs') - - if sys.platform == 'win32' and os.path.isdir(extra_dll_dir): - os.add_dll_directory(extra_dll_dir) - - """)) - - for k, i in system_info.saved_results.items(): - f.write('%s=%r\n' % (k, i)) - f.write(textwrap.dedent(r''' - def get_info(name): - g = globals() - return g.get(name, g.get(name + "_info", {})) - - def show(): - """ - Show libraries in the system on which NumPy was built. - - Print information about various resources (libraries, library - directories, include directories, etc.) in the system on which - NumPy was built. - - See Also - -------- - get_include : Returns the directory containing NumPy C - header files. - - Notes - ----- - 1. Classes specifying the information to be printed are defined - in the `numpy.distutils.system_info` module. - - Information may include: - - * ``language``: language used to write the libraries (mostly - C or f77) - * ``libraries``: names of libraries found in the system - * ``library_dirs``: directories containing the libraries - * ``include_dirs``: directories containing library header files - * ``src_dirs``: directories containing library source files - * ``define_macros``: preprocessor macros used by - ``distutils.setup`` - * ``baseline``: minimum CPU features required - * ``found``: dispatched features supported in the system - * ``not found``: dispatched features that are not supported - in the system - - 2. NumPy BLAS/LAPACK Installation Notes - - Installing a numpy wheel (``pip install numpy`` or force it - via ``pip install numpy --only-binary :numpy: numpy``) includes - an OpenBLAS implementation of the BLAS and LAPACK linear algebra - APIs. In this case, ``library_dirs`` reports the original build - time configuration as compiled with gcc/gfortran; at run time - the OpenBLAS library is in - ``site-packages/numpy.libs/`` (linux), or - ``site-packages/numpy/.dylibs/`` (macOS), or - ``site-packages/numpy/.libs/`` (windows). - - Installing numpy from source - (``pip install numpy --no-binary numpy``) searches for BLAS and - LAPACK dynamic link libraries at build time as influenced by - environment variables NPY_BLAS_LIBS, NPY_CBLAS_LIBS, and - NPY_LAPACK_LIBS; or NPY_BLAS_ORDER and NPY_LAPACK_ORDER; - or the optional file ``~/.numpy-site.cfg``. - NumPy remembers those locations and expects to load the same - libraries at run-time. - In NumPy 1.21+ on macOS, 'accelerate' (Apple's Accelerate BLAS - library) is in the default build-time search order after - 'openblas'. - - Examples - -------- - >>> import numpy as np - >>> np.show_config() - blas_opt_info: - language = c - define_macros = [('HAVE_CBLAS', None)] - libraries = ['openblas', 'openblas'] - library_dirs = ['/usr/local/lib'] - """ - from numpy._core._multiarray_umath import ( - __cpu_features__, __cpu_baseline__, __cpu_dispatch__ - ) - for name,info_dict in globals().items(): - if name[0] == "_" or type(info_dict) is not type({}): continue - print(name + ":") - if not info_dict: - print(" NOT AVAILABLE") - for k,v in info_dict.items(): - v = str(v) - if k == "sources" and len(v) > 200: - v = v[:60] + " ...\n... " + v[-60:] - print(" %s = %s" % (k,v)) - - features_found, features_not_found = [], [] - for feature in __cpu_dispatch__: - if __cpu_features__[feature]: - features_found.append(feature) - else: - features_not_found.append(feature) - - print("Supported SIMD extensions in this NumPy install:") - print(" baseline = %s" % (','.join(__cpu_baseline__))) - print(" found = %s" % (','.join(features_found))) - print(" not found = %s" % (','.join(features_not_found))) - - ''')) - - return target - -def msvc_version(compiler): - """Return version major and minor of compiler instance if it is - MSVC, raise an exception otherwise.""" - if not compiler.compiler_type == "msvc": - raise ValueError("Compiler instance is not msvc (%s)"\ - % compiler.compiler_type) - return compiler._MSVCCompiler__version - -def get_build_architecture(): - # Importing distutils.msvccompiler triggers a warning on non-Windows - # systems, so delay the import to here. - from distutils.msvccompiler import get_build_architecture - return get_build_architecture() - - -_cxx_ignore_flags = {'-Werror=implicit-function-declaration', '-std=c99'} - - -def sanitize_cxx_flags(cxxflags): - ''' - Some flags are valid for C but not C++. Prune them. - ''' - return [flag for flag in cxxflags if flag not in _cxx_ignore_flags] - - -def exec_mod_from_location(modname, modfile): - ''' - Use importlib machinery to import a module `modname` from the file - `modfile`. Depending on the `spec.loader`, the module may not be - registered in sys.modules. - ''' - spec = importlib.util.spec_from_file_location(modname, modfile) - foo = importlib.util.module_from_spec(spec) - spec.loader.exec_module(foo) - return foo diff --git a/numpy/distutils/msvc9compiler.py b/numpy/distutils/msvc9compiler.py deleted file mode 100644 index 68239495d6c7..000000000000 --- a/numpy/distutils/msvc9compiler.py +++ /dev/null @@ -1,63 +0,0 @@ -import os -from distutils.msvc9compiler import MSVCCompiler as _MSVCCompiler - -from .system_info import platform_bits - - -def _merge(old, new): - """Concatenate two environment paths avoiding repeats. - - Here `old` is the environment string before the base class initialize - function is called and `new` is the string after the call. The new string - will be a fixed string if it is not obtained from the current environment, - or the same as the old string if obtained from the same environment. The aim - here is not to append the new string if it is already contained in the old - string so as to limit the growth of the environment string. - - Parameters - ---------- - old : string - Previous environment string. - new : string - New environment string. - - Returns - ------- - ret : string - Updated environment string. - - """ - if not old: - return new - if new in old: - return old - - # Neither new nor old is empty. Give old priority. - return ';'.join([old, new]) - - -class MSVCCompiler(_MSVCCompiler): - def __init__(self, verbose=0, dry_run=0, force=0): - _MSVCCompiler.__init__(self, verbose, dry_run, force) - - def initialize(self, plat_name=None): - # The 'lib' and 'include' variables may be overwritten - # by MSVCCompiler.initialize, so save them for later merge. - environ_lib = os.getenv('lib') - environ_include = os.getenv('include') - _MSVCCompiler.initialize(self, plat_name) - - # Merge current and previous values of 'lib' and 'include' - os.environ['lib'] = _merge(environ_lib, os.environ['lib']) - os.environ['include'] = _merge(environ_include, os.environ['include']) - - # msvc9 building for 32 bits requires SSE2 to work around a - # compiler bug. - if platform_bits == 32: - self.compile_options += ['/arch:SSE2'] - self.compile_options_debug += ['/arch:SSE2'] - - def manifest_setup_ldargs(self, output_filename, build_temp, ld_args): - ld_args.append('/MANIFEST') - _MSVCCompiler.manifest_setup_ldargs(self, output_filename, - build_temp, ld_args) diff --git a/numpy/distutils/msvccompiler.py b/numpy/distutils/msvccompiler.py deleted file mode 100644 index 2b93221baac8..000000000000 --- a/numpy/distutils/msvccompiler.py +++ /dev/null @@ -1,76 +0,0 @@ -import os -from distutils.msvccompiler import MSVCCompiler as _MSVCCompiler - -from .system_info import platform_bits - - -def _merge(old, new): - """Concatenate two environment paths avoiding repeats. - - Here `old` is the environment string before the base class initialize - function is called and `new` is the string after the call. The new string - will be a fixed string if it is not obtained from the current environment, - or the same as the old string if obtained from the same environment. The aim - here is not to append the new string if it is already contained in the old - string so as to limit the growth of the environment string. - - Parameters - ---------- - old : string - Previous environment string. - new : string - New environment string. - - Returns - ------- - ret : string - Updated environment string. - - """ - if new in old: - return old - if not old: - return new - - # Neither new nor old is empty. Give old priority. - return ';'.join([old, new]) - - -class MSVCCompiler(_MSVCCompiler): - def __init__(self, verbose=0, dry_run=0, force=0): - _MSVCCompiler.__init__(self, verbose, dry_run, force) - - def initialize(self): - # The 'lib' and 'include' variables may be overwritten - # by MSVCCompiler.initialize, so save them for later merge. - environ_lib = os.getenv('lib', '') - environ_include = os.getenv('include', '') - _MSVCCompiler.initialize(self) - - # Merge current and previous values of 'lib' and 'include' - os.environ['lib'] = _merge(environ_lib, os.environ['lib']) - os.environ['include'] = _merge(environ_include, os.environ['include']) - - # msvc9 building for 32 bits requires SSE2 to work around a - # compiler bug. - if platform_bits == 32: - self.compile_options += ['/arch:SSE2'] - self.compile_options_debug += ['/arch:SSE2'] - - -def lib_opts_if_msvc(build_cmd): - """ Add flags if we are using MSVC compiler - - We can't see `build_cmd` in our scope, because we have not initialized - the distutils build command, so use this deferred calculation to run - when we are building the library. - """ - if build_cmd.compiler.compiler_type != 'msvc': - return [] - # Explicitly disable whole-program optimization. - flags = ['/GL-'] - # Disable voltbl section for vc142 to allow link using mingw-w64; see: - # https://github.com/matthew-brett/dll_investigation/issues/1#issuecomment-1100468171 - if build_cmd.compiler_opt.cc_test_flags(['-d2VolatileMetadata-']): - flags.append('-d2VolatileMetadata-') - return flags diff --git a/numpy/distutils/npy_pkg_config.py b/numpy/distutils/npy_pkg_config.py deleted file mode 100644 index 14e8791b14cd..000000000000 --- a/numpy/distutils/npy_pkg_config.py +++ /dev/null @@ -1,441 +0,0 @@ -import sys -import re -import os - -from configparser import RawConfigParser - -__all__ = ['FormatError', 'PkgNotFound', 'LibraryInfo', 'VariableSet', - 'read_config', 'parse_flags'] - -_VAR = re.compile(r'\$\{([a-zA-Z0-9_-]+)\}') - -class FormatError(OSError): - """ - Exception thrown when there is a problem parsing a configuration file. - - """ - def __init__(self, msg): - self.msg = msg - - def __str__(self): - return self.msg - -class PkgNotFound(OSError): - """Exception raised when a package can not be located.""" - def __init__(self, msg): - self.msg = msg - - def __str__(self): - return self.msg - -def parse_flags(line): - """ - Parse a line from a config file containing compile flags. - - Parameters - ---------- - line : str - A single line containing one or more compile flags. - - Returns - ------- - d : dict - Dictionary of parsed flags, split into relevant categories. - These categories are the keys of `d`: - - * 'include_dirs' - * 'library_dirs' - * 'libraries' - * 'macros' - * 'ignored' - - """ - d = {'include_dirs': [], 'library_dirs': [], 'libraries': [], - 'macros': [], 'ignored': []} - - flags = (' ' + line).split(' -') - for flag in flags: - flag = '-' + flag - if len(flag) > 0: - if flag.startswith('-I'): - d['include_dirs'].append(flag[2:].strip()) - elif flag.startswith('-L'): - d['library_dirs'].append(flag[2:].strip()) - elif flag.startswith('-l'): - d['libraries'].append(flag[2:].strip()) - elif flag.startswith('-D'): - d['macros'].append(flag[2:].strip()) - else: - d['ignored'].append(flag) - - return d - -def _escape_backslash(val): - return val.replace('\\', '\\\\') - -class LibraryInfo: - """ - Object containing build information about a library. - - Parameters - ---------- - name : str - The library name. - description : str - Description of the library. - version : str - Version string. - sections : dict - The sections of the configuration file for the library. The keys are - the section headers, the values the text under each header. - vars : class instance - A `VariableSet` instance, which contains ``(name, value)`` pairs for - variables defined in the configuration file for the library. - requires : sequence, optional - The required libraries for the library to be installed. - - Notes - ----- - All input parameters (except "sections" which is a method) are available as - attributes of the same name. - - """ - def __init__(self, name, description, version, sections, vars, requires=None): - self.name = name - self.description = description - if requires: - self.requires = requires - else: - self.requires = [] - self.version = version - self._sections = sections - self.vars = vars - - def sections(self): - """ - Return the section headers of the config file. - - Parameters - ---------- - None - - Returns - ------- - keys : list of str - The list of section headers. - - """ - return list(self._sections.keys()) - - def cflags(self, section="default"): - val = self.vars.interpolate(self._sections[section]['cflags']) - return _escape_backslash(val) - - def libs(self, section="default"): - val = self.vars.interpolate(self._sections[section]['libs']) - return _escape_backslash(val) - - def __str__(self): - m = ['Name: %s' % self.name, 'Description: %s' % self.description] - if self.requires: - m.append('Requires:') - else: - m.append('Requires: %s' % ",".join(self.requires)) - m.append('Version: %s' % self.version) - - return "\n".join(m) - -class VariableSet: - """ - Container object for the variables defined in a config file. - - `VariableSet` can be used as a plain dictionary, with the variable names - as keys. - - Parameters - ---------- - d : dict - Dict of items in the "variables" section of the configuration file. - - """ - def __init__(self, d): - self._raw_data = dict([(k, v) for k, v in d.items()]) - - self._re = {} - self._re_sub = {} - - self._init_parse() - - def _init_parse(self): - for k, v in self._raw_data.items(): - self._init_parse_var(k, v) - - def _init_parse_var(self, name, value): - self._re[name] = re.compile(r'\$\{%s\}' % name) - self._re_sub[name] = value - - def interpolate(self, value): - # Brute force: we keep interpolating until there is no '${var}' anymore - # or until interpolated string is equal to input string - def _interpolate(value): - for k in self._re.keys(): - value = self._re[k].sub(self._re_sub[k], value) - return value - while _VAR.search(value): - nvalue = _interpolate(value) - if nvalue == value: - break - value = nvalue - - return value - - def variables(self): - """ - Return the list of variable names. - - Parameters - ---------- - None - - Returns - ------- - names : list of str - The names of all variables in the `VariableSet` instance. - - """ - return list(self._raw_data.keys()) - - # Emulate a dict to set/get variables values - def __getitem__(self, name): - return self._raw_data[name] - - def __setitem__(self, name, value): - self._raw_data[name] = value - self._init_parse_var(name, value) - -def parse_meta(config): - if not config.has_section('meta'): - raise FormatError("No meta section found !") - - d = dict(config.items('meta')) - - for k in ['name', 'description', 'version']: - if not k in d: - raise FormatError("Option %s (section [meta]) is mandatory, " - "but not found" % k) - - if not 'requires' in d: - d['requires'] = [] - - return d - -def parse_variables(config): - if not config.has_section('variables'): - raise FormatError("No variables section found !") - - d = {} - - for name, value in config.items("variables"): - d[name] = value - - return VariableSet(d) - -def parse_sections(config): - return meta_d, r - -def pkg_to_filename(pkg_name): - return "%s.ini" % pkg_name - -def parse_config(filename, dirs=None): - if dirs: - filenames = [os.path.join(d, filename) for d in dirs] - else: - filenames = [filename] - - config = RawConfigParser() - - n = config.read(filenames) - if not len(n) >= 1: - raise PkgNotFound("Could not find file(s) %s" % str(filenames)) - - # Parse meta and variables sections - meta = parse_meta(config) - - vars = {} - if config.has_section('variables'): - for name, value in config.items("variables"): - vars[name] = _escape_backslash(value) - - # Parse "normal" sections - secs = [s for s in config.sections() if not s in ['meta', 'variables']] - sections = {} - - requires = {} - for s in secs: - d = {} - if config.has_option(s, "requires"): - requires[s] = config.get(s, 'requires') - - for name, value in config.items(s): - d[name] = value - sections[s] = d - - return meta, vars, sections, requires - -def _read_config_imp(filenames, dirs=None): - def _read_config(f): - meta, vars, sections, reqs = parse_config(f, dirs) - # recursively add sections and variables of required libraries - for rname, rvalue in reqs.items(): - nmeta, nvars, nsections, nreqs = _read_config(pkg_to_filename(rvalue)) - - # Update var dict for variables not in 'top' config file - for k, v in nvars.items(): - if not k in vars: - vars[k] = v - - # Update sec dict - for oname, ovalue in nsections[rname].items(): - if ovalue: - sections[rname][oname] += ' %s' % ovalue - - return meta, vars, sections, reqs - - meta, vars, sections, reqs = _read_config(filenames) - - # FIXME: document this. If pkgname is defined in the variables section, and - # there is no pkgdir variable defined, pkgdir is automatically defined to - # the path of pkgname. This requires the package to be imported to work - if not 'pkgdir' in vars and "pkgname" in vars: - pkgname = vars["pkgname"] - if not pkgname in sys.modules: - raise ValueError("You should import %s to get information on %s" % - (pkgname, meta["name"])) - - mod = sys.modules[pkgname] - vars["pkgdir"] = _escape_backslash(os.path.dirname(mod.__file__)) - - return LibraryInfo(name=meta["name"], description=meta["description"], - version=meta["version"], sections=sections, vars=VariableSet(vars)) - -# Trivial cache to cache LibraryInfo instances creation. To be really -# efficient, the cache should be handled in read_config, since a same file can -# be parsed many time outside LibraryInfo creation, but I doubt this will be a -# problem in practice -_CACHE = {} -def read_config(pkgname, dirs=None): - """ - Return library info for a package from its configuration file. - - Parameters - ---------- - pkgname : str - Name of the package (should match the name of the .ini file, without - the extension, e.g. foo for the file foo.ini). - dirs : sequence, optional - If given, should be a sequence of directories - usually including - the NumPy base directory - where to look for npy-pkg-config files. - - Returns - ------- - pkginfo : class instance - The `LibraryInfo` instance containing the build information. - - Raises - ------ - PkgNotFound - If the package is not found. - - See Also - -------- - misc_util.get_info, misc_util.get_pkg_info - - Examples - -------- - >>> npymath_info = np.distutils.npy_pkg_config.read_config('npymath') - >>> type(npymath_info) - - >>> print(npymath_info) - Name: npymath - Description: Portable, core math library implementing C99 standard - Requires: - Version: 0.1 #random - - """ - try: - return _CACHE[pkgname] - except KeyError: - v = _read_config_imp(pkg_to_filename(pkgname), dirs) - _CACHE[pkgname] = v - return v - -# TODO: -# - implements version comparison (modversion + atleast) - -# pkg-config simple emulator - useful for debugging, and maybe later to query -# the system -if __name__ == '__main__': - from optparse import OptionParser - import glob - - parser = OptionParser() - parser.add_option("--cflags", dest="cflags", action="store_true", - help="output all preprocessor and compiler flags") - parser.add_option("--libs", dest="libs", action="store_true", - help="output all linker flags") - parser.add_option("--use-section", dest="section", - help="use this section instead of default for options") - parser.add_option("--version", dest="version", action="store_true", - help="output version") - parser.add_option("--atleast-version", dest="min_version", - help="Minimal version") - parser.add_option("--list-all", dest="list_all", action="store_true", - help="Minimal version") - parser.add_option("--define-variable", dest="define_variable", - help="Replace variable with the given value") - - (options, args) = parser.parse_args(sys.argv) - - if len(args) < 2: - raise ValueError("Expect package name on the command line:") - - if options.list_all: - files = glob.glob("*.ini") - for f in files: - info = read_config(f) - print("%s\t%s - %s" % (info.name, info.name, info.description)) - - pkg_name = args[1] - d = os.environ.get('NPY_PKG_CONFIG_PATH') - if d: - info = read_config( - pkg_name, ['numpy/_core/lib/npy-pkg-config', '.', d] - ) - else: - info = read_config( - pkg_name, ['numpy/_core/lib/npy-pkg-config', '.'] - ) - - if options.section: - section = options.section - else: - section = "default" - - if options.define_variable: - m = re.search(r'([\S]+)=([\S]+)', options.define_variable) - if not m: - raise ValueError("--define-variable option should be of " - "the form --define-variable=foo=bar") - else: - name = m.group(1) - value = m.group(2) - info.vars[name] = value - - if options.cflags: - print(info.cflags(section)) - if options.libs: - print(info.libs(section)) - if options.version: - print(info.version) - if options.min_version: - print(info.version >= options.min_version) diff --git a/numpy/distutils/numpy_distribution.py b/numpy/distutils/numpy_distribution.py deleted file mode 100644 index ea8182659cb1..000000000000 --- a/numpy/distutils/numpy_distribution.py +++ /dev/null @@ -1,17 +0,0 @@ -# XXX: Handle setuptools ? -from distutils.core import Distribution - -# This class is used because we add new files (sconscripts, and so on) with the -# scons command -class NumpyDistribution(Distribution): - def __init__(self, attrs = None): - # A list of (sconscripts, pre_hook, post_hook, src, parent_names) - self.scons_data = [] - # A list of installable libraries - self.installed_libraries = [] - # A dict of pkg_config files to generate/install - self.installed_pkg_config = {} - Distribution.__init__(self, attrs) - - def has_scons_scripts(self): - return bool(self.scons_data) diff --git a/numpy/distutils/pathccompiler.py b/numpy/distutils/pathccompiler.py deleted file mode 100644 index 48051810ee21..000000000000 --- a/numpy/distutils/pathccompiler.py +++ /dev/null @@ -1,21 +0,0 @@ -from distutils.unixccompiler import UnixCCompiler - -class PathScaleCCompiler(UnixCCompiler): - - """ - PathScale compiler compatible with an gcc built Python. - """ - - compiler_type = 'pathcc' - cc_exe = 'pathcc' - cxx_exe = 'pathCC' - - def __init__ (self, verbose=0, dry_run=0, force=0): - UnixCCompiler.__init__ (self, verbose, dry_run, force) - cc_compiler = self.cc_exe - cxx_compiler = self.cxx_exe - self.set_executables(compiler=cc_compiler, - compiler_so=cc_compiler, - compiler_cxx=cxx_compiler, - linker_exe=cc_compiler, - linker_so=cc_compiler + ' -shared') diff --git a/numpy/distutils/system_info.py b/numpy/distutils/system_info.py deleted file mode 100644 index e428b47f08d4..000000000000 --- a/numpy/distutils/system_info.py +++ /dev/null @@ -1,3267 +0,0 @@ -""" -This file defines a set of system_info classes for getting -information about various resources (libraries, library directories, -include directories, etc.) in the system. Usage: - info_dict = get_info() - where is a string 'atlas','x11','fftw','lapack','blas', - 'lapack_src', 'blas_src', etc. For a complete list of allowed names, - see the definition of get_info() function below. - - Returned info_dict is a dictionary which is compatible with - distutils.setup keyword arguments. If info_dict == {}, then the - asked resource is not available (system_info could not find it). - - Several *_info classes specify an environment variable to specify - the locations of software. When setting the corresponding environment - variable to 'None' then the software will be ignored, even when it - is available in system. - -Global parameters: - system_info.search_static_first - search static libraries (.a) - in precedence to shared ones (.so, .sl) if enabled. - system_info.verbosity - output the results to stdout if enabled. - -The file 'site.cfg' is looked for in - -1) Directory of main setup.py file being run. -2) Home directory of user running the setup.py file as ~/.numpy-site.cfg -3) System wide directory (location of this file...) - -The first one found is used to get system configuration options The -format is that used by ConfigParser (i.e., Windows .INI style). The -section ALL is not intended for general use. - -Appropriate defaults are used if nothing is specified. - -The order of finding the locations of resources is the following: - 1. environment variable - 2. section in site.cfg - 3. DEFAULT section in site.cfg - 4. System default search paths (see ``default_*`` variables below). -Only the first complete match is returned. - -Currently, the following classes are available, along with their section names: - - Numeric_info:Numeric - _numpy_info:Numeric - _pkg_config_info:None - accelerate_info:accelerate - accelerate_lapack_info:accelerate - agg2_info:agg2 - amd_info:amd - atlas_3_10_blas_info:atlas - atlas_3_10_blas_threads_info:atlas - atlas_3_10_info:atlas - atlas_3_10_threads_info:atlas - atlas_blas_info:atlas - atlas_blas_threads_info:atlas - atlas_info:atlas - atlas_threads_info:atlas - blas64__opt_info:ALL # usage recommended (general ILP64 BLAS, 64_ symbol suffix) - blas_ilp64_opt_info:ALL # usage recommended (general ILP64 BLAS) - blas_ilp64_plain_opt_info:ALL # usage recommended (general ILP64 BLAS, no symbol suffix) - blas_info:blas - blas_mkl_info:mkl - blas_ssl2_info:ssl2 - blas_opt_info:ALL # usage recommended - blas_src_info:blas_src - blis_info:blis - boost_python_info:boost_python - dfftw_info:fftw - dfftw_threads_info:fftw - djbfft_info:djbfft - f2py_info:ALL - fft_opt_info:ALL - fftw2_info:fftw - fftw3_info:fftw3 - fftw_info:fftw - fftw_threads_info:fftw - flame_info:flame - freetype2_info:freetype2 - gdk_2_info:gdk_2 - gdk_info:gdk - gdk_pixbuf_2_info:gdk_pixbuf_2 - gdk_pixbuf_xlib_2_info:gdk_pixbuf_xlib_2 - gdk_x11_2_info:gdk_x11_2 - gtkp_2_info:gtkp_2 - gtkp_x11_2_info:gtkp_x11_2 - lapack64__opt_info:ALL # usage recommended (general ILP64 LAPACK, 64_ symbol suffix) - lapack_atlas_3_10_info:atlas - lapack_atlas_3_10_threads_info:atlas - lapack_atlas_info:atlas - lapack_atlas_threads_info:atlas - lapack_ilp64_opt_info:ALL # usage recommended (general ILP64 LAPACK) - lapack_ilp64_plain_opt_info:ALL # usage recommended (general ILP64 LAPACK, no symbol suffix) - lapack_info:lapack - lapack_mkl_info:mkl - lapack_ssl2_info:ssl2 - lapack_opt_info:ALL # usage recommended - lapack_src_info:lapack_src - mkl_info:mkl - ssl2_info:ssl2 - numarray_info:numarray - numerix_info:numerix - numpy_info:numpy - openblas64__info:openblas64_ - openblas64__lapack_info:openblas64_ - openblas_clapack_info:openblas - openblas_ilp64_info:openblas_ilp64 - openblas_ilp64_lapack_info:openblas_ilp64 - openblas_info:openblas - openblas_lapack_info:openblas - sfftw_info:fftw - sfftw_threads_info:fftw - system_info:ALL - umfpack_info:umfpack - wx_info:wx - x11_info:x11 - xft_info:xft - -Note that blas_opt_info and lapack_opt_info honor the NPY_BLAS_ORDER -and NPY_LAPACK_ORDER environment variables to determine the order in which -specific BLAS and LAPACK libraries are searched for. - -This search (or autodetection) can be bypassed by defining the environment -variables NPY_BLAS_LIBS and NPY_LAPACK_LIBS, which should then contain the -exact linker flags to use (language will be set to F77). Building against -Netlib BLAS/LAPACK or stub files, in order to be able to switch BLAS and LAPACK -implementations at runtime. If using this to build NumPy itself, it is -recommended to also define NPY_CBLAS_LIBS (assuming your BLAS library has a -CBLAS interface) to enable CBLAS usage for matrix multiplication (unoptimized -otherwise). - -Example: ----------- -[DEFAULT] -# default section -library_dirs = /usr/lib:/usr/local/lib:/opt/lib -include_dirs = /usr/include:/usr/local/include:/opt/include -src_dirs = /usr/local/src:/opt/src -# search static libraries (.a) in preference to shared ones (.so) -search_static_first = 0 - -[fftw] -libraries = rfftw, fftw - -[atlas] -library_dirs = /usr/lib/3dnow:/usr/lib/3dnow/atlas -# for overriding the names of the atlas libraries -libraries = lapack, f77blas, cblas, atlas - -[x11] -library_dirs = /usr/X11R6/lib -include_dirs = /usr/X11R6/include ----------- - -Note that the ``libraries`` key is the default setting for libraries. - -Authors: - Pearu Peterson , February 2002 - David M. Cooke , April 2002 - -Copyright 2002 Pearu Peterson all rights reserved, -Pearu Peterson -Permission to use, modify, and distribute this software is given under the -terms of the NumPy (BSD style) license. See LICENSE.txt that came with -this distribution for specifics. - -NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. - -""" -import sys -import os -import re -import copy -import warnings -import subprocess -import textwrap - -from glob import glob -from functools import reduce -from configparser import NoOptionError -from configparser import RawConfigParser as ConfigParser -# It seems that some people are importing ConfigParser from here so is -# good to keep its class name. Use of RawConfigParser is needed in -# order to be able to load path names with percent in them, like -# `feature%2Fcool` which is common on git flow branch names. - -from distutils.errors import DistutilsError -from distutils.dist import Distribution -import sysconfig -from numpy.distutils import log -from distutils.util import get_platform - -from numpy.distutils.exec_command import ( - find_executable, filepath_from_subprocess_output, - ) -from numpy.distutils.misc_util import (is_sequence, is_string, - get_shared_lib_extension) -from numpy.distutils.command.config import config as cmd_config -from numpy.distutils import customized_ccompiler as _customized_ccompiler -from numpy.distutils import _shell_utils -import distutils.ccompiler -import tempfile -import shutil - -__all__ = ['system_info'] - -# Determine number of bits -import platform -_bits = {'32bit': 32, '64bit': 64} -platform_bits = _bits[platform.architecture()[0]] - - -global_compiler = None - -def customized_ccompiler(): - global global_compiler - if not global_compiler: - global_compiler = _customized_ccompiler() - return global_compiler - - -def _c_string_literal(s): - """ - Convert a python string into a literal suitable for inclusion into C code - """ - # only these three characters are forbidden in C strings - s = s.replace('\\', r'\\') - s = s.replace('"', r'\"') - s = s.replace('\n', r'\n') - return '"{}"'.format(s) - - -def libpaths(paths, bits): - """Return a list of library paths valid on 32 or 64 bit systems. - - Inputs: - paths : sequence - A sequence of strings (typically paths) - bits : int - An integer, the only valid values are 32 or 64. A ValueError exception - is raised otherwise. - - Examples: - - Consider a list of directories - >>> paths = ['/usr/X11R6/lib','/usr/X11/lib','/usr/lib'] - - For a 32-bit platform, this is already valid: - >>> np.distutils.system_info.libpaths(paths,32) - ['/usr/X11R6/lib', '/usr/X11/lib', '/usr/lib'] - - On 64 bits, we prepend the '64' postfix - >>> np.distutils.system_info.libpaths(paths,64) - ['/usr/X11R6/lib64', '/usr/X11R6/lib', '/usr/X11/lib64', '/usr/X11/lib', - '/usr/lib64', '/usr/lib'] - """ - if bits not in (32, 64): - raise ValueError("Invalid bit size in libpaths: 32 or 64 only") - - # Handle 32bit case - if bits == 32: - return paths - - # Handle 64bit case - out = [] - for p in paths: - out.extend([p + '64', p]) - - return out - - -if sys.platform == 'win32': - default_lib_dirs = ['C:\\', - os.path.join(sysconfig.get_config_var('exec_prefix'), - 'libs')] - default_runtime_dirs = [] - default_include_dirs = [] - default_src_dirs = ['.'] - default_x11_lib_dirs = [] - default_x11_include_dirs = [] - _include_dirs = [ - 'include', - 'include/suitesparse', - ] - _lib_dirs = [ - 'lib', - ] - - _include_dirs = [d.replace('/', os.sep) for d in _include_dirs] - _lib_dirs = [d.replace('/', os.sep) for d in _lib_dirs] - def add_system_root(library_root): - """Add a package manager root to the include directories""" - global default_lib_dirs - global default_include_dirs - - library_root = os.path.normpath(library_root) - - default_lib_dirs.extend( - os.path.join(library_root, d) for d in _lib_dirs) - default_include_dirs.extend( - os.path.join(library_root, d) for d in _include_dirs) - - # VCpkg is the de-facto package manager on windows for C/C++ - # libraries. If it is on the PATH, then we append its paths here. - vcpkg = shutil.which('vcpkg') - if vcpkg: - vcpkg_dir = os.path.dirname(vcpkg) - if platform.architecture()[0] == '32bit': - specifier = 'x86' - else: - specifier = 'x64' - - vcpkg_installed = os.path.join(vcpkg_dir, 'installed') - for vcpkg_root in [ - os.path.join(vcpkg_installed, specifier + '-windows'), - os.path.join(vcpkg_installed, specifier + '-windows-static'), - ]: - add_system_root(vcpkg_root) - - # Conda is another popular package manager that provides libraries - conda = shutil.which('conda') - if conda: - conda_dir = os.path.dirname(conda) - add_system_root(os.path.join(conda_dir, '..', 'Library')) - add_system_root(os.path.join(conda_dir, 'Library')) - -else: - default_lib_dirs = libpaths(['/usr/local/lib', '/opt/lib', '/usr/lib', - '/opt/local/lib', '/sw/lib'], platform_bits) - default_runtime_dirs = [] - default_include_dirs = ['/usr/local/include', - '/opt/include', - # path of umfpack under macports - '/opt/local/include/ufsparse', - '/opt/local/include', '/sw/include', - '/usr/include/suitesparse'] - default_src_dirs = ['.', '/usr/local/src', '/opt/src', '/sw/src'] - - default_x11_lib_dirs = libpaths(['/usr/X11R6/lib', '/usr/X11/lib', - '/usr/lib'], platform_bits) - default_x11_include_dirs = ['/usr/X11R6/include', '/usr/X11/include'] - - if os.path.exists('/usr/lib/X11'): - globbed_x11_dir = glob('/usr/lib/*/libX11.so') - if globbed_x11_dir: - x11_so_dir = os.path.split(globbed_x11_dir[0])[0] - default_x11_lib_dirs.extend([x11_so_dir, '/usr/lib/X11']) - default_x11_include_dirs.extend(['/usr/lib/X11/include', - '/usr/include/X11']) - - with open(os.devnull, 'w') as tmp: - try: - p = subprocess.Popen(["gcc", "-print-multiarch"], stdout=subprocess.PIPE, - stderr=tmp) - except (OSError, DistutilsError): - # OSError if gcc is not installed, or SandboxViolation (DistutilsError - # subclass) if an old setuptools bug is triggered (see gh-3160). - pass - else: - triplet = str(p.communicate()[0].decode().strip()) - if p.returncode == 0: - # gcc supports the "-print-multiarch" option - default_x11_lib_dirs += [os.path.join("/usr/lib/", triplet)] - default_lib_dirs += [os.path.join("/usr/lib/", triplet)] - - -if os.path.join(sys.prefix, 'lib') not in default_lib_dirs: - default_lib_dirs.insert(0, os.path.join(sys.prefix, 'lib')) - default_include_dirs.append(os.path.join(sys.prefix, 'include')) - default_src_dirs.append(os.path.join(sys.prefix, 'src')) - -default_lib_dirs = [_m for _m in default_lib_dirs if os.path.isdir(_m)] -default_runtime_dirs = [_m for _m in default_runtime_dirs if os.path.isdir(_m)] -default_include_dirs = [_m for _m in default_include_dirs if os.path.isdir(_m)] -default_src_dirs = [_m for _m in default_src_dirs if os.path.isdir(_m)] - -so_ext = get_shared_lib_extension() - - -def get_standard_file(fname): - """Returns a list of files named 'fname' from - 1) System-wide directory (directory-location of this module) - 2) Users HOME directory (os.environ['HOME']) - 3) Local directory - """ - # System-wide file - filenames = [] - try: - f = __file__ - except NameError: - f = sys.argv[0] - sysfile = os.path.join(os.path.split(os.path.abspath(f))[0], - fname) - if os.path.isfile(sysfile): - filenames.append(sysfile) - - # Home directory - # And look for the user config file - try: - f = os.path.expanduser('~') - except KeyError: - pass - else: - user_file = os.path.join(f, fname) - if os.path.isfile(user_file): - filenames.append(user_file) - - # Local file - if os.path.isfile(fname): - filenames.append(os.path.abspath(fname)) - - return filenames - - -def _parse_env_order(base_order, env): - """ Parse an environment variable `env` by splitting with "," and only returning elements from `base_order` - - This method will sequence the environment variable and check for their - individual elements in `base_order`. - - The items in the environment variable may be negated via '^item' or '!itema,itemb'. - It must start with ^/! to negate all options. - - Raises - ------ - ValueError: for mixed negated and non-negated orders or multiple negated orders - - Parameters - ---------- - base_order : list of str - the base list of orders - env : str - the environment variable to be parsed, if none is found, `base_order` is returned - - Returns - ------- - allow_order : list of str - allowed orders in lower-case - unknown_order : list of str - for values not overlapping with `base_order` - """ - order_str = os.environ.get(env, None) - - # ensure all base-orders are lower-case (for easier comparison) - base_order = [order.lower() for order in base_order] - if order_str is None: - return base_order, [] - - neg = order_str.startswith(('^', '!')) - # Check format - order_str_l = list(order_str) - sum_neg = order_str_l.count('^') + order_str_l.count('!') - if neg: - if sum_neg > 1: - raise ValueError(f"Environment variable '{env}' may only contain a single (prefixed) negation: {order_str}") - # remove prefix - order_str = order_str[1:] - elif sum_neg > 0: - raise ValueError(f"Environment variable '{env}' may not mix negated an non-negated items: {order_str}") - - # Split and lower case - orders = order_str.lower().split(',') - - # to inform callee about non-overlapping elements - unknown_order = [] - - # if negated, we have to remove from the order - if neg: - allow_order = base_order.copy() - - for order in orders: - if not order: - continue - - if order not in base_order: - unknown_order.append(order) - continue - - if order in allow_order: - allow_order.remove(order) - - else: - allow_order = [] - - for order in orders: - if not order: - continue - - if order not in base_order: - unknown_order.append(order) - continue - - if order not in allow_order: - allow_order.append(order) - - return allow_order, unknown_order - - -def get_info(name, notfound_action=0): - """ - notfound_action: - 0 - do nothing - 1 - display warning message - 2 - raise error - """ - cl = {'armpl': armpl_info, - 'blas_armpl': blas_armpl_info, - 'lapack_armpl': lapack_armpl_info, - 'fftw3_armpl': fftw3_armpl_info, - 'atlas': atlas_info, # use lapack_opt or blas_opt instead - 'atlas_threads': atlas_threads_info, # ditto - 'atlas_blas': atlas_blas_info, - 'atlas_blas_threads': atlas_blas_threads_info, - 'lapack_atlas': lapack_atlas_info, # use lapack_opt instead - 'lapack_atlas_threads': lapack_atlas_threads_info, # ditto - 'atlas_3_10': atlas_3_10_info, # use lapack_opt or blas_opt instead - 'atlas_3_10_threads': atlas_3_10_threads_info, # ditto - 'atlas_3_10_blas': atlas_3_10_blas_info, - 'atlas_3_10_blas_threads': atlas_3_10_blas_threads_info, - 'lapack_atlas_3_10': lapack_atlas_3_10_info, # use lapack_opt instead - 'lapack_atlas_3_10_threads': lapack_atlas_3_10_threads_info, # ditto - 'flame': flame_info, # use lapack_opt instead - 'mkl': mkl_info, - 'ssl2': ssl2_info, - # openblas which may or may not have embedded lapack - 'openblas': openblas_info, # use blas_opt instead - # openblas with embedded lapack - 'openblas_lapack': openblas_lapack_info, # use blas_opt instead - 'openblas_clapack': openblas_clapack_info, # use blas_opt instead - 'blis': blis_info, # use blas_opt instead - 'lapack_mkl': lapack_mkl_info, # use lapack_opt instead - 'blas_mkl': blas_mkl_info, # use blas_opt instead - 'lapack_ssl2': lapack_ssl2_info, - 'blas_ssl2': blas_ssl2_info, - 'accelerate': accelerate_info, # use blas_opt instead - 'accelerate_lapack': accelerate_lapack_info, - 'openblas64_': openblas64__info, - 'openblas64__lapack': openblas64__lapack_info, - 'openblas_ilp64': openblas_ilp64_info, - 'openblas_ilp64_lapack': openblas_ilp64_lapack_info, - 'x11': x11_info, - 'fft_opt': fft_opt_info, - 'fftw': fftw_info, - 'fftw2': fftw2_info, - 'fftw3': fftw3_info, - 'dfftw': dfftw_info, - 'sfftw': sfftw_info, - 'fftw_threads': fftw_threads_info, - 'dfftw_threads': dfftw_threads_info, - 'sfftw_threads': sfftw_threads_info, - 'djbfft': djbfft_info, - 'blas': blas_info, # use blas_opt instead - 'lapack': lapack_info, # use lapack_opt instead - 'lapack_src': lapack_src_info, - 'blas_src': blas_src_info, - 'numpy': numpy_info, - 'f2py': f2py_info, - 'Numeric': Numeric_info, - 'numeric': Numeric_info, - 'numarray': numarray_info, - 'numerix': numerix_info, - 'lapack_opt': lapack_opt_info, - 'lapack_ilp64_opt': lapack_ilp64_opt_info, - 'lapack_ilp64_plain_opt': lapack_ilp64_plain_opt_info, - 'lapack64__opt': lapack64__opt_info, - 'blas_opt': blas_opt_info, - 'blas_ilp64_opt': blas_ilp64_opt_info, - 'blas_ilp64_plain_opt': blas_ilp64_plain_opt_info, - 'blas64__opt': blas64__opt_info, - 'boost_python': boost_python_info, - 'agg2': agg2_info, - 'wx': wx_info, - 'gdk_pixbuf_xlib_2': gdk_pixbuf_xlib_2_info, - 'gdk-pixbuf-xlib-2.0': gdk_pixbuf_xlib_2_info, - 'gdk_pixbuf_2': gdk_pixbuf_2_info, - 'gdk-pixbuf-2.0': gdk_pixbuf_2_info, - 'gdk': gdk_info, - 'gdk_2': gdk_2_info, - 'gdk-2.0': gdk_2_info, - 'gdk_x11_2': gdk_x11_2_info, - 'gdk-x11-2.0': gdk_x11_2_info, - 'gtkp_x11_2': gtkp_x11_2_info, - 'gtk+-x11-2.0': gtkp_x11_2_info, - 'gtkp_2': gtkp_2_info, - 'gtk+-2.0': gtkp_2_info, - 'xft': xft_info, - 'freetype2': freetype2_info, - 'umfpack': umfpack_info, - 'amd': amd_info, - }.get(name.lower(), system_info) - return cl().get_info(notfound_action) - - -class NotFoundError(DistutilsError): - """Some third-party program or library is not found.""" - - -class AliasedOptionError(DistutilsError): - """ - Aliases entries in config files should not be existing. - In section '{section}' we found multiple appearances of options {options}.""" - - -class AtlasNotFoundError(NotFoundError): - """ - Atlas (http://github.com/math-atlas/math-atlas) libraries not found. - Directories to search for the libraries can be specified in the - numpy/distutils/site.cfg file (section [atlas]) or by setting - the ATLAS environment variable.""" - - -class FlameNotFoundError(NotFoundError): - """ - FLAME (http://www.cs.utexas.edu/~flame/web/) libraries not found. - Directories to search for the libraries can be specified in the - numpy/distutils/site.cfg file (section [flame]).""" - - -class LapackNotFoundError(NotFoundError): - """ - Lapack (http://www.netlib.org/lapack/) libraries not found. - Directories to search for the libraries can be specified in the - numpy/distutils/site.cfg file (section [lapack]) or by setting - the LAPACK environment variable.""" - - -class LapackSrcNotFoundError(LapackNotFoundError): - """ - Lapack (http://www.netlib.org/lapack/) sources not found. - Directories to search for the sources can be specified in the - numpy/distutils/site.cfg file (section [lapack_src]) or by setting - the LAPACK_SRC environment variable.""" - - -class LapackILP64NotFoundError(NotFoundError): - """ - 64-bit Lapack libraries not found. - Known libraries in numpy/distutils/site.cfg file are: - openblas64_, openblas_ilp64 - """ - -class BlasOptNotFoundError(NotFoundError): - """ - Optimized (vendor) Blas libraries are not found. - Falls back to netlib Blas library which has worse performance. - A better performance should be easily gained by switching - Blas library.""" - -class BlasNotFoundError(NotFoundError): - """ - Blas (http://www.netlib.org/blas/) libraries not found. - Directories to search for the libraries can be specified in the - numpy/distutils/site.cfg file (section [blas]) or by setting - the BLAS environment variable.""" - -class BlasILP64NotFoundError(NotFoundError): - """ - 64-bit Blas libraries not found. - Known libraries in numpy/distutils/site.cfg file are: - openblas64_, openblas_ilp64 - """ - -class BlasSrcNotFoundError(BlasNotFoundError): - """ - Blas (http://www.netlib.org/blas/) sources not found. - Directories to search for the sources can be specified in the - numpy/distutils/site.cfg file (section [blas_src]) or by setting - the BLAS_SRC environment variable.""" - - -class FFTWNotFoundError(NotFoundError): - """ - FFTW (http://www.fftw.org/) libraries not found. - Directories to search for the libraries can be specified in the - numpy/distutils/site.cfg file (section [fftw]) or by setting - the FFTW environment variable.""" - - -class DJBFFTNotFoundError(NotFoundError): - """ - DJBFFT (https://cr.yp.to/djbfft.html) libraries not found. - Directories to search for the libraries can be specified in the - numpy/distutils/site.cfg file (section [djbfft]) or by setting - the DJBFFT environment variable.""" - - -class NumericNotFoundError(NotFoundError): - """ - Numeric (https://www.numpy.org/) module not found. - Get it from above location, install it, and retry setup.py.""" - - -class X11NotFoundError(NotFoundError): - """X11 libraries not found.""" - - -class UmfpackNotFoundError(NotFoundError): - """ - UMFPACK sparse solver (https://www.cise.ufl.edu/research/sparse/umfpack/) - not found. Directories to search for the libraries can be specified in the - numpy/distutils/site.cfg file (section [umfpack]) or by setting - the UMFPACK environment variable.""" - - -class system_info: - - """ get_info() is the only public method. Don't use others. - """ - dir_env_var = None - # XXX: search_static_first is disabled by default, may disappear in - # future unless it is proved to be useful. - search_static_first = 0 - # The base-class section name is a random word "ALL" and is not really - # intended for general use. It cannot be None nor can it be DEFAULT as - # these break the ConfigParser. See gh-15338 - section = 'ALL' - saved_results = {} - - notfounderror = NotFoundError - - def __init__(self, - default_lib_dirs=default_lib_dirs, - default_include_dirs=default_include_dirs, - ): - self.__class__.info = {} - self.local_prefixes = [] - defaults = {'library_dirs': os.pathsep.join(default_lib_dirs), - 'include_dirs': os.pathsep.join(default_include_dirs), - 'runtime_library_dirs': os.pathsep.join(default_runtime_dirs), - 'rpath': '', - 'src_dirs': os.pathsep.join(default_src_dirs), - 'search_static_first': str(self.search_static_first), - 'extra_compile_args': '', 'extra_link_args': ''} - self.cp = ConfigParser(defaults) - self.files = [] - self.files.extend(get_standard_file('.numpy-site.cfg')) - self.files.extend(get_standard_file('site.cfg')) - self.parse_config_files() - - if self.section is not None: - self.search_static_first = self.cp.getboolean( - self.section, 'search_static_first') - assert isinstance(self.search_static_first, int) - - def parse_config_files(self): - self.cp.read(self.files) - if not self.cp.has_section(self.section): - if self.section is not None: - self.cp.add_section(self.section) - - def calc_libraries_info(self): - libs = self.get_libraries() - dirs = self.get_lib_dirs() - # The extensions use runtime_library_dirs - r_dirs = self.get_runtime_lib_dirs() - # Intrinsic distutils use rpath, we simply append both entries - # as though they were one entry - r_dirs.extend(self.get_runtime_lib_dirs(key='rpath')) - info = {} - for lib in libs: - i = self.check_libs(dirs, [lib]) - if i is not None: - dict_append(info, **i) - else: - log.info('Library %s was not found. Ignoring' % (lib)) - - if r_dirs: - i = self.check_libs(r_dirs, [lib]) - if i is not None: - # Swap library keywords found to runtime_library_dirs - # the libraries are insisting on the user having defined - # them using the library_dirs, and not necessarily by - # runtime_library_dirs - del i['libraries'] - i['runtime_library_dirs'] = i.pop('library_dirs') - dict_append(info, **i) - else: - log.info('Runtime library %s was not found. Ignoring' % (lib)) - - return info - - def set_info(self, **info): - if info: - lib_info = self.calc_libraries_info() - dict_append(info, **lib_info) - # Update extra information - extra_info = self.calc_extra_info() - dict_append(info, **extra_info) - self.saved_results[self.__class__.__name__] = info - - def get_option_single(self, *options): - """ Ensure that only one of `options` are found in the section - - Parameters - ---------- - *options : list of str - a list of options to be found in the section (``self.section``) - - Returns - ------- - str : - the option that is uniquely found in the section - - Raises - ------ - AliasedOptionError : - in case more than one of the options are found - """ - found = [self.cp.has_option(self.section, opt) for opt in options] - if sum(found) == 1: - return options[found.index(True)] - elif sum(found) == 0: - # nothing is found anyways - return options[0] - - # Else we have more than 1 key found - if AliasedOptionError.__doc__ is None: - raise AliasedOptionError() - raise AliasedOptionError(AliasedOptionError.__doc__.format( - section=self.section, options='[{}]'.format(', '.join(options)))) - - - def has_info(self): - return self.__class__.__name__ in self.saved_results - - def calc_extra_info(self): - """ Updates the information in the current information with - respect to these flags: - extra_compile_args - extra_link_args - """ - info = {} - for key in ['extra_compile_args', 'extra_link_args']: - # Get values - opt = self.cp.get(self.section, key) - opt = _shell_utils.NativeParser.split(opt) - if opt: - tmp = {key: opt} - dict_append(info, **tmp) - return info - - def get_info(self, notfound_action=0): - """ Return a dictionary with items that are compatible - with numpy.distutils.setup keyword arguments. - """ - flag = 0 - if not self.has_info(): - flag = 1 - log.info(self.__class__.__name__ + ':') - if hasattr(self, 'calc_info'): - self.calc_info() - if notfound_action: - if not self.has_info(): - if notfound_action == 1: - warnings.warn(self.notfounderror.__doc__, stacklevel=2) - elif notfound_action == 2: - raise self.notfounderror(self.notfounderror.__doc__) - else: - raise ValueError(repr(notfound_action)) - - if not self.has_info(): - log.info(' NOT AVAILABLE') - self.set_info() - else: - log.info(' FOUND:') - - res = self.saved_results.get(self.__class__.__name__) - if log.get_threshold() <= log.INFO and flag: - for k, v in res.items(): - v = str(v) - if k in ['sources', 'libraries'] and len(v) > 270: - v = v[:120] + '...\n...\n...' + v[-120:] - log.info(' %s = %s', k, v) - log.info('') - - return copy.deepcopy(res) - - def get_paths(self, section, key): - dirs = self.cp.get(section, key).split(os.pathsep) - env_var = self.dir_env_var - if env_var: - if is_sequence(env_var): - e0 = env_var[-1] - for e in env_var: - if e in os.environ: - e0 = e - break - if not env_var[0] == e0: - log.info('Setting %s=%s' % (env_var[0], e0)) - env_var = e0 - if env_var and env_var in os.environ: - d = os.environ[env_var] - if d == 'None': - log.info('Disabled %s: %s', - self.__class__.__name__, '(%s is None)' - % (env_var,)) - return [] - if os.path.isfile(d): - dirs = [os.path.dirname(d)] + dirs - l = getattr(self, '_lib_names', []) - if len(l) == 1: - b = os.path.basename(d) - b = os.path.splitext(b)[0] - if b[:3] == 'lib': - log.info('Replacing _lib_names[0]==%r with %r' \ - % (self._lib_names[0], b[3:])) - self._lib_names[0] = b[3:] - else: - ds = d.split(os.pathsep) - ds2 = [] - for d in ds: - if os.path.isdir(d): - ds2.append(d) - for dd in ['include', 'lib']: - d1 = os.path.join(d, dd) - if os.path.isdir(d1): - ds2.append(d1) - dirs = ds2 + dirs - default_dirs = self.cp.get(self.section, key).split(os.pathsep) - dirs.extend(default_dirs) - ret = [] - for d in dirs: - if len(d) > 0 and not os.path.isdir(d): - warnings.warn('Specified path %s is invalid.' % d, stacklevel=2) - continue - - if d not in ret: - ret.append(d) - - log.debug('( %s = %s )', key, ':'.join(ret)) - return ret - - def get_lib_dirs(self, key='library_dirs'): - return self.get_paths(self.section, key) - - def get_runtime_lib_dirs(self, key='runtime_library_dirs'): - path = self.get_paths(self.section, key) - if path == ['']: - path = [] - return path - - def get_include_dirs(self, key='include_dirs'): - return self.get_paths(self.section, key) - - def get_src_dirs(self, key='src_dirs'): - return self.get_paths(self.section, key) - - def get_libs(self, key, default): - try: - libs = self.cp.get(self.section, key) - except NoOptionError: - if not default: - return [] - if is_string(default): - return [default] - return default - return [b for b in [a.strip() for a in libs.split(',')] if b] - - def get_libraries(self, key='libraries'): - if hasattr(self, '_lib_names'): - return self.get_libs(key, default=self._lib_names) - else: - return self.get_libs(key, '') - - def library_extensions(self): - c = customized_ccompiler() - static_exts = [] - if c.compiler_type != 'msvc': - # MSVC doesn't understand binutils - static_exts.append('.a') - if sys.platform == 'win32': - static_exts.append('.lib') # .lib is used by MSVC and others - if self.search_static_first: - exts = static_exts + [so_ext] - else: - exts = [so_ext] + static_exts - if sys.platform == 'cygwin': - exts.append('.dll.a') - if sys.platform == 'darwin': - exts.append('.dylib') - return exts - - def check_libs(self, lib_dirs, libs, opt_libs=[]): - """If static or shared libraries are available then return - their info dictionary. - - Checks for all libraries as shared libraries first, then - static (or vice versa if self.search_static_first is True). - """ - exts = self.library_extensions() - info = None - for ext in exts: - info = self._check_libs(lib_dirs, libs, opt_libs, [ext]) - if info is not None: - break - if not info: - log.info(' libraries %s not found in %s', ','.join(libs), - lib_dirs) - return info - - def check_libs2(self, lib_dirs, libs, opt_libs=[]): - """If static or shared libraries are available then return - their info dictionary. - - Checks each library for shared or static. - """ - exts = self.library_extensions() - info = self._check_libs(lib_dirs, libs, opt_libs, exts) - if not info: - log.info(' libraries %s not found in %s', ','.join(libs), - lib_dirs) - - return info - - def _find_lib(self, lib_dir, lib, exts): - assert is_string(lib_dir) - # under windows first try without 'lib' prefix - if sys.platform == 'win32': - lib_prefixes = ['', 'lib'] - else: - lib_prefixes = ['lib'] - # for each library name, see if we can find a file for it. - for ext in exts: - for prefix in lib_prefixes: - p = self.combine_paths(lib_dir, prefix + lib + ext) - if p: - break - if p: - assert len(p) == 1 - # ??? splitext on p[0] would do this for cygwin - # doesn't seem correct - if ext == '.dll.a': - lib += '.dll' - if ext == '.lib': - lib = prefix + lib - return lib - - return False - - def _find_libs(self, lib_dirs, libs, exts): - # make sure we preserve the order of libs, as it can be important - found_dirs, found_libs = [], [] - for lib in libs: - for lib_dir in lib_dirs: - found_lib = self._find_lib(lib_dir, lib, exts) - if found_lib: - found_libs.append(found_lib) - if lib_dir not in found_dirs: - found_dirs.append(lib_dir) - break - return found_dirs, found_libs - - def _check_libs(self, lib_dirs, libs, opt_libs, exts): - """Find mandatory and optional libs in expected paths. - - Missing optional libraries are silently forgotten. - """ - if not is_sequence(lib_dirs): - lib_dirs = [lib_dirs] - # First, try to find the mandatory libraries - found_dirs, found_libs = self._find_libs(lib_dirs, libs, exts) - if len(found_libs) > 0 and len(found_libs) == len(libs): - # Now, check for optional libraries - opt_found_dirs, opt_found_libs = self._find_libs(lib_dirs, opt_libs, exts) - found_libs.extend(opt_found_libs) - for lib_dir in opt_found_dirs: - if lib_dir not in found_dirs: - found_dirs.append(lib_dir) - info = {'libraries': found_libs, 'library_dirs': found_dirs} - return info - else: - return None - - def combine_paths(self, *args): - """Return a list of existing paths composed by all combinations - of items from the arguments. - """ - return combine_paths(*args) - - -class fft_opt_info(system_info): - - def calc_info(self): - info = {} - fftw_info = get_info('fftw3') or get_info('fftw2') or get_info('dfftw') - djbfft_info = get_info('djbfft') - if fftw_info: - dict_append(info, **fftw_info) - if djbfft_info: - dict_append(info, **djbfft_info) - self.set_info(**info) - return - - -class fftw_info(system_info): - #variables to override - section = 'fftw' - dir_env_var = 'FFTW' - notfounderror = FFTWNotFoundError - ver_info = [{'name':'fftw3', - 'libs':['fftw3'], - 'includes':['fftw3.h'], - 'macros':[('SCIPY_FFTW3_H', None)]}, - {'name':'fftw2', - 'libs':['rfftw', 'fftw'], - 'includes':['fftw.h', 'rfftw.h'], - 'macros':[('SCIPY_FFTW_H', None)]}] - - def calc_ver_info(self, ver_param): - """Returns True on successful version detection, else False""" - lib_dirs = self.get_lib_dirs() - incl_dirs = self.get_include_dirs() - - opt = self.get_option_single(self.section + '_libs', 'libraries') - libs = self.get_libs(opt, ver_param['libs']) - info = self.check_libs(lib_dirs, libs) - if info is not None: - flag = 0 - for d in incl_dirs: - if len(self.combine_paths(d, ver_param['includes'])) \ - == len(ver_param['includes']): - dict_append(info, include_dirs=[d]) - flag = 1 - break - if flag: - dict_append(info, define_macros=ver_param['macros']) - else: - info = None - if info is not None: - self.set_info(**info) - return True - else: - log.info(' %s not found' % (ver_param['name'])) - return False - - def calc_info(self): - for i in self.ver_info: - if self.calc_ver_info(i): - break - - -class fftw2_info(fftw_info): - #variables to override - section = 'fftw' - dir_env_var = 'FFTW' - notfounderror = FFTWNotFoundError - ver_info = [{'name':'fftw2', - 'libs':['rfftw', 'fftw'], - 'includes':['fftw.h', 'rfftw.h'], - 'macros':[('SCIPY_FFTW_H', None)]} - ] - - -class fftw3_info(fftw_info): - #variables to override - section = 'fftw3' - dir_env_var = 'FFTW3' - notfounderror = FFTWNotFoundError - ver_info = [{'name':'fftw3', - 'libs':['fftw3'], - 'includes':['fftw3.h'], - 'macros':[('SCIPY_FFTW3_H', None)]}, - ] - - -class fftw3_armpl_info(fftw_info): - section = 'fftw3' - dir_env_var = 'ARMPL_DIR' - notfounderror = FFTWNotFoundError - ver_info = [{'name': 'fftw3', - 'libs': ['armpl_lp64_mp'], - 'includes': ['fftw3.h'], - 'macros': [('SCIPY_FFTW3_H', None)]}] - - -class dfftw_info(fftw_info): - section = 'fftw' - dir_env_var = 'FFTW' - ver_info = [{'name':'dfftw', - 'libs':['drfftw', 'dfftw'], - 'includes':['dfftw.h', 'drfftw.h'], - 'macros':[('SCIPY_DFFTW_H', None)]}] - - -class sfftw_info(fftw_info): - section = 'fftw' - dir_env_var = 'FFTW' - ver_info = [{'name':'sfftw', - 'libs':['srfftw', 'sfftw'], - 'includes':['sfftw.h', 'srfftw.h'], - 'macros':[('SCIPY_SFFTW_H', None)]}] - - -class fftw_threads_info(fftw_info): - section = 'fftw' - dir_env_var = 'FFTW' - ver_info = [{'name':'fftw threads', - 'libs':['rfftw_threads', 'fftw_threads'], - 'includes':['fftw_threads.h', 'rfftw_threads.h'], - 'macros':[('SCIPY_FFTW_THREADS_H', None)]}] - - -class dfftw_threads_info(fftw_info): - section = 'fftw' - dir_env_var = 'FFTW' - ver_info = [{'name':'dfftw threads', - 'libs':['drfftw_threads', 'dfftw_threads'], - 'includes':['dfftw_threads.h', 'drfftw_threads.h'], - 'macros':[('SCIPY_DFFTW_THREADS_H', None)]}] - - -class sfftw_threads_info(fftw_info): - section = 'fftw' - dir_env_var = 'FFTW' - ver_info = [{'name':'sfftw threads', - 'libs':['srfftw_threads', 'sfftw_threads'], - 'includes':['sfftw_threads.h', 'srfftw_threads.h'], - 'macros':[('SCIPY_SFFTW_THREADS_H', None)]}] - - -class djbfft_info(system_info): - section = 'djbfft' - dir_env_var = 'DJBFFT' - notfounderror = DJBFFTNotFoundError - - def get_paths(self, section, key): - pre_dirs = system_info.get_paths(self, section, key) - dirs = [] - for d in pre_dirs: - dirs.extend(self.combine_paths(d, ['djbfft']) + [d]) - return [d for d in dirs if os.path.isdir(d)] - - def calc_info(self): - lib_dirs = self.get_lib_dirs() - incl_dirs = self.get_include_dirs() - info = None - for d in lib_dirs: - p = self.combine_paths(d, ['djbfft.a']) - if p: - info = {'extra_objects': p} - break - p = self.combine_paths(d, ['libdjbfft.a', 'libdjbfft' + so_ext]) - if p: - info = {'libraries': ['djbfft'], 'library_dirs': [d]} - break - if info is None: - return - for d in incl_dirs: - if len(self.combine_paths(d, ['fftc8.h', 'fftfreq.h'])) == 2: - dict_append(info, include_dirs=[d], - define_macros=[('SCIPY_DJBFFT_H', None)]) - self.set_info(**info) - return - return - - -class mkl_info(system_info): - section = 'mkl' - dir_env_var = 'MKLROOT' - _lib_mkl = ['mkl_rt'] - - def get_mkl_rootdir(self): - mklroot = os.environ.get('MKLROOT', None) - if mklroot is not None: - return mklroot - paths = os.environ.get('LD_LIBRARY_PATH', '').split(os.pathsep) - ld_so_conf = '/etc/ld.so.conf' - if os.path.isfile(ld_so_conf): - with open(ld_so_conf) as f: - for d in f: - d = d.strip() - if d: - paths.append(d) - intel_mkl_dirs = [] - for path in paths: - path_atoms = path.split(os.sep) - for m in path_atoms: - if m.startswith('mkl'): - d = os.sep.join(path_atoms[:path_atoms.index(m) + 2]) - intel_mkl_dirs.append(d) - break - for d in paths: - dirs = glob(os.path.join(d, 'mkl', '*')) - dirs += glob(os.path.join(d, 'mkl*')) - for sub_dir in dirs: - if os.path.isdir(os.path.join(sub_dir, 'lib')): - return sub_dir - return None - - def __init__(self): - mklroot = self.get_mkl_rootdir() - if mklroot is None: - system_info.__init__(self) - else: - from .cpuinfo import cpu - if cpu.is_Itanium(): - plt = '64' - elif cpu.is_Intel() and cpu.is_64bit(): - plt = 'intel64' - else: - plt = '32' - system_info.__init__( - self, - default_lib_dirs=[os.path.join(mklroot, 'lib', plt)], - default_include_dirs=[os.path.join(mklroot, 'include')]) - - def calc_info(self): - lib_dirs = self.get_lib_dirs() - incl_dirs = self.get_include_dirs() - opt = self.get_option_single('mkl_libs', 'libraries') - mkl_libs = self.get_libs(opt, self._lib_mkl) - info = self.check_libs2(lib_dirs, mkl_libs) - if info is None: - return - dict_append(info, - define_macros=[('SCIPY_MKL_H', None), - ('HAVE_CBLAS', None)], - include_dirs=incl_dirs) - if sys.platform == 'win32': - pass # win32 has no pthread library - else: - dict_append(info, libraries=['pthread']) - self.set_info(**info) - - -class lapack_mkl_info(mkl_info): - pass - - -class blas_mkl_info(mkl_info): - pass - - -class ssl2_info(system_info): - section = 'ssl2' - dir_env_var = 'SSL2_DIR' - # Multi-threaded version. Python itself must be built by Fujitsu compiler. - _lib_ssl2 = ['fjlapackexsve'] - # Single-threaded version - #_lib_ssl2 = ['fjlapacksve'] - - def get_tcsds_rootdir(self): - tcsdsroot = os.environ.get('TCSDS_PATH', None) - if tcsdsroot is not None: - return tcsdsroot - return None - - def __init__(self): - tcsdsroot = self.get_tcsds_rootdir() - if tcsdsroot is None: - system_info.__init__(self) - else: - system_info.__init__( - self, - default_lib_dirs=[os.path.join(tcsdsroot, 'lib64')], - default_include_dirs=[os.path.join(tcsdsroot, - 'clang-comp/include')]) - - def calc_info(self): - tcsdsroot = self.get_tcsds_rootdir() - - lib_dirs = self.get_lib_dirs() - if lib_dirs is None: - lib_dirs = os.path.join(tcsdsroot, 'lib64') - - incl_dirs = self.get_include_dirs() - if incl_dirs is None: - incl_dirs = os.path.join(tcsdsroot, 'clang-comp/include') - - ssl2_libs = self.get_libs('ssl2_libs', self._lib_ssl2) - - info = self.check_libs2(lib_dirs, ssl2_libs) - if info is None: - return - dict_append(info, - define_macros=[('HAVE_CBLAS', None), - ('HAVE_SSL2', 1)], - include_dirs=incl_dirs,) - self.set_info(**info) - - -class lapack_ssl2_info(ssl2_info): - pass - - -class blas_ssl2_info(ssl2_info): - pass - - - -class armpl_info(system_info): - section = 'armpl' - dir_env_var = 'ARMPL_DIR' - _lib_armpl = ['armpl_lp64_mp'] - - def calc_info(self): - lib_dirs = self.get_lib_dirs() - incl_dirs = self.get_include_dirs() - armpl_libs = self.get_libs('armpl_libs', self._lib_armpl) - info = self.check_libs2(lib_dirs, armpl_libs) - if info is None: - return - dict_append(info, - define_macros=[('SCIPY_MKL_H', None), - ('HAVE_CBLAS', None)], - include_dirs=incl_dirs) - self.set_info(**info) - -class lapack_armpl_info(armpl_info): - pass - -class blas_armpl_info(armpl_info): - pass - - -class atlas_info(system_info): - section = 'atlas' - dir_env_var = 'ATLAS' - _lib_names = ['f77blas', 'cblas'] - if sys.platform[:7] == 'freebsd': - _lib_atlas = ['atlas_r'] - _lib_lapack = ['alapack_r'] - else: - _lib_atlas = ['atlas'] - _lib_lapack = ['lapack'] - - notfounderror = AtlasNotFoundError - - def get_paths(self, section, key): - pre_dirs = system_info.get_paths(self, section, key) - dirs = [] - for d in pre_dirs: - dirs.extend(self.combine_paths(d, ['atlas*', 'ATLAS*', - 'sse', '3dnow', 'sse2']) + [d]) - return [d for d in dirs if os.path.isdir(d)] - - def calc_info(self): - lib_dirs = self.get_lib_dirs() - info = {} - opt = self.get_option_single('atlas_libs', 'libraries') - atlas_libs = self.get_libs(opt, self._lib_names + self._lib_atlas) - lapack_libs = self.get_libs('lapack_libs', self._lib_lapack) - atlas = None - lapack = None - atlas_1 = None - for d in lib_dirs: - atlas = self.check_libs2(d, atlas_libs, []) - if atlas is not None: - lib_dirs2 = [d] + self.combine_paths(d, ['atlas*', 'ATLAS*']) - lapack = self.check_libs2(lib_dirs2, lapack_libs, []) - if lapack is not None: - break - if atlas: - atlas_1 = atlas - log.info(self.__class__) - if atlas is None: - atlas = atlas_1 - if atlas is None: - return - include_dirs = self.get_include_dirs() - h = (self.combine_paths(lib_dirs + include_dirs, 'cblas.h') or [None]) - h = h[0] - if h: - h = os.path.dirname(h) - dict_append(info, include_dirs=[h]) - info['language'] = 'c' - if lapack is not None: - dict_append(info, **lapack) - dict_append(info, **atlas) - elif 'lapack_atlas' in atlas['libraries']: - dict_append(info, **atlas) - dict_append(info, - define_macros=[('ATLAS_WITH_LAPACK_ATLAS', None)]) - self.set_info(**info) - return - else: - dict_append(info, **atlas) - dict_append(info, define_macros=[('ATLAS_WITHOUT_LAPACK', None)]) - message = textwrap.dedent(""" - ********************************************************************* - Could not find lapack library within the ATLAS installation. - ********************************************************************* - """) - warnings.warn(message, stacklevel=2) - self.set_info(**info) - return - - # Check if lapack library is complete, only warn if it is not. - lapack_dir = lapack['library_dirs'][0] - lapack_name = lapack['libraries'][0] - lapack_lib = None - lib_prefixes = ['lib'] - if sys.platform == 'win32': - lib_prefixes.append('') - for e in self.library_extensions(): - for prefix in lib_prefixes: - fn = os.path.join(lapack_dir, prefix + lapack_name + e) - if os.path.exists(fn): - lapack_lib = fn - break - if lapack_lib: - break - if lapack_lib is not None: - sz = os.stat(lapack_lib)[6] - if sz <= 4000 * 1024: - message = textwrap.dedent(""" - ********************************************************************* - Lapack library (from ATLAS) is probably incomplete: - size of %s is %sk (expected >4000k) - - Follow the instructions in the KNOWN PROBLEMS section of the file - numpy/INSTALL.txt. - ********************************************************************* - """) % (lapack_lib, sz / 1024) - warnings.warn(message, stacklevel=2) - else: - info['language'] = 'f77' - - atlas_version, atlas_extra_info = get_atlas_version(**atlas) - dict_append(info, **atlas_extra_info) - - self.set_info(**info) - - -class atlas_blas_info(atlas_info): - _lib_names = ['f77blas', 'cblas'] - - def calc_info(self): - lib_dirs = self.get_lib_dirs() - info = {} - opt = self.get_option_single('atlas_libs', 'libraries') - atlas_libs = self.get_libs(opt, self._lib_names + self._lib_atlas) - atlas = self.check_libs2(lib_dirs, atlas_libs, []) - if atlas is None: - return - include_dirs = self.get_include_dirs() - h = (self.combine_paths(lib_dirs + include_dirs, 'cblas.h') or [None]) - h = h[0] - if h: - h = os.path.dirname(h) - dict_append(info, include_dirs=[h]) - info['language'] = 'c' - info['define_macros'] = [('HAVE_CBLAS', None)] - - atlas_version, atlas_extra_info = get_atlas_version(**atlas) - dict_append(atlas, **atlas_extra_info) - - dict_append(info, **atlas) - - self.set_info(**info) - return - - -class atlas_threads_info(atlas_info): - dir_env_var = ['PTATLAS', 'ATLAS'] - _lib_names = ['ptf77blas', 'ptcblas'] - - -class atlas_blas_threads_info(atlas_blas_info): - dir_env_var = ['PTATLAS', 'ATLAS'] - _lib_names = ['ptf77blas', 'ptcblas'] - - -class lapack_atlas_info(atlas_info): - _lib_names = ['lapack_atlas'] + atlas_info._lib_names - - -class lapack_atlas_threads_info(atlas_threads_info): - _lib_names = ['lapack_atlas'] + atlas_threads_info._lib_names - - -class atlas_3_10_info(atlas_info): - _lib_names = ['satlas'] - _lib_atlas = _lib_names - _lib_lapack = _lib_names - - -class atlas_3_10_blas_info(atlas_3_10_info): - _lib_names = ['satlas'] - - def calc_info(self): - lib_dirs = self.get_lib_dirs() - info = {} - opt = self.get_option_single('atlas_lib', 'libraries') - atlas_libs = self.get_libs(opt, self._lib_names) - atlas = self.check_libs2(lib_dirs, atlas_libs, []) - if atlas is None: - return - include_dirs = self.get_include_dirs() - h = (self.combine_paths(lib_dirs + include_dirs, 'cblas.h') or [None]) - h = h[0] - if h: - h = os.path.dirname(h) - dict_append(info, include_dirs=[h]) - info['language'] = 'c' - info['define_macros'] = [('HAVE_CBLAS', None)] - - atlas_version, atlas_extra_info = get_atlas_version(**atlas) - dict_append(atlas, **atlas_extra_info) - - dict_append(info, **atlas) - - self.set_info(**info) - return - - -class atlas_3_10_threads_info(atlas_3_10_info): - dir_env_var = ['PTATLAS', 'ATLAS'] - _lib_names = ['tatlas'] - _lib_atlas = _lib_names - _lib_lapack = _lib_names - - -class atlas_3_10_blas_threads_info(atlas_3_10_blas_info): - dir_env_var = ['PTATLAS', 'ATLAS'] - _lib_names = ['tatlas'] - - -class lapack_atlas_3_10_info(atlas_3_10_info): - pass - - -class lapack_atlas_3_10_threads_info(atlas_3_10_threads_info): - pass - - -class lapack_info(system_info): - section = 'lapack' - dir_env_var = 'LAPACK' - _lib_names = ['lapack'] - notfounderror = LapackNotFoundError - - def calc_info(self): - lib_dirs = self.get_lib_dirs() - - opt = self.get_option_single('lapack_libs', 'libraries') - lapack_libs = self.get_libs(opt, self._lib_names) - info = self.check_libs(lib_dirs, lapack_libs, []) - if info is None: - return - info['language'] = 'f77' - self.set_info(**info) - - -class lapack_src_info(system_info): - # LAPACK_SRC is deprecated, please do not use this! - # Build or install a BLAS library via your package manager or from - # source separately. - section = 'lapack_src' - dir_env_var = 'LAPACK_SRC' - notfounderror = LapackSrcNotFoundError - - def get_paths(self, section, key): - pre_dirs = system_info.get_paths(self, section, key) - dirs = [] - for d in pre_dirs: - dirs.extend([d] + self.combine_paths(d, ['LAPACK*/SRC', 'SRC'])) - return [d for d in dirs if os.path.isdir(d)] - - def calc_info(self): - src_dirs = self.get_src_dirs() - src_dir = '' - for d in src_dirs: - if os.path.isfile(os.path.join(d, 'dgesv.f')): - src_dir = d - break - if not src_dir: - #XXX: Get sources from netlib. May be ask first. - return - # The following is extracted from LAPACK-3.0/SRC/Makefile. - # Added missing names from lapack-lite-3.1.1/SRC/Makefile - # while keeping removed names for Lapack-3.0 compatibility. - allaux = ''' - ilaenv ieeeck lsame lsamen xerbla - iparmq - ''' # *.f - laux = ''' - bdsdc bdsqr disna labad lacpy ladiv lae2 laebz laed0 laed1 - laed2 laed3 laed4 laed5 laed6 laed7 laed8 laed9 laeda laev2 - lagtf lagts lamch lamrg lanst lapy2 lapy3 larnv larrb larre - larrf lartg laruv las2 lascl lasd0 lasd1 lasd2 lasd3 lasd4 - lasd5 lasd6 lasd7 lasd8 lasd9 lasda lasdq lasdt laset lasq1 - lasq2 lasq3 lasq4 lasq5 lasq6 lasr lasrt lassq lasv2 pttrf - stebz stedc steqr sterf - - larra larrc larrd larr larrk larrj larrr laneg laisnan isnan - lazq3 lazq4 - ''' # [s|d]*.f - lasrc = ''' - gbbrd gbcon gbequ gbrfs gbsv gbsvx gbtf2 gbtrf gbtrs gebak - gebal gebd2 gebrd gecon geequ gees geesx geev geevx gegs gegv - gehd2 gehrd gelq2 gelqf gels gelsd gelss gelsx gelsy geql2 - geqlf geqp3 geqpf geqr2 geqrf gerfs gerq2 gerqf gesc2 gesdd - gesv gesvd gesvx getc2 getf2 getrf getri getrs ggbak ggbal - gges ggesx ggev ggevx ggglm gghrd gglse ggqrf ggrqf ggsvd - ggsvp gtcon gtrfs gtsv gtsvx gttrf gttrs gtts2 hgeqz hsein - hseqr labrd lacon laein lags2 lagtm lahqr lahrd laic1 lals0 - lalsa lalsd langb lange langt lanhs lansb lansp lansy lantb - lantp lantr lapll lapmt laqgb laqge laqp2 laqps laqsb laqsp - laqsy lar1v lar2v larf larfb larfg larft larfx largv larrv - lartv larz larzb larzt laswp lasyf latbs latdf latps latrd - latrs latrz latzm lauu2 lauum pbcon pbequ pbrfs pbstf pbsv - pbsvx pbtf2 pbtrf pbtrs pocon poequ porfs posv posvx potf2 - potrf potri potrs ppcon ppequ pprfs ppsv ppsvx pptrf pptri - pptrs ptcon pteqr ptrfs ptsv ptsvx pttrs ptts2 spcon sprfs - spsv spsvx sptrf sptri sptrs stegr stein sycon syrfs sysv - sysvx sytf2 sytrf sytri sytrs tbcon tbrfs tbtrs tgevc tgex2 - tgexc tgsen tgsja tgsna tgsy2 tgsyl tpcon tprfs tptri tptrs - trcon trevc trexc trrfs trsen trsna trsyl trti2 trtri trtrs - tzrqf tzrzf - - lacn2 lahr2 stemr laqr0 laqr1 laqr2 laqr3 laqr4 laqr5 - ''' # [s|c|d|z]*.f - sd_lasrc = ''' - laexc lag2 lagv2 laln2 lanv2 laqtr lasy2 opgtr opmtr org2l - org2r orgbr orghr orgl2 orglq orgql orgqr orgr2 orgrq orgtr - orm2l orm2r ormbr ormhr orml2 ormlq ormql ormqr ormr2 ormr3 - ormrq ormrz ormtr rscl sbev sbevd sbevx sbgst sbgv sbgvd sbgvx - sbtrd spev spevd spevx spgst spgv spgvd spgvx sptrd stev stevd - stevr stevx syev syevd syevr syevx sygs2 sygst sygv sygvd - sygvx sytd2 sytrd - ''' # [s|d]*.f - cz_lasrc = ''' - bdsqr hbev hbevd hbevx hbgst hbgv hbgvd hbgvx hbtrd hecon heev - heevd heevr heevx hegs2 hegst hegv hegvd hegvx herfs hesv - hesvx hetd2 hetf2 hetrd hetrf hetri hetrs hpcon hpev hpevd - hpevx hpgst hpgv hpgvd hpgvx hprfs hpsv hpsvx hptrd hptrf - hptri hptrs lacgv lacp2 lacpy lacrm lacrt ladiv laed0 laed7 - laed8 laesy laev2 lahef lanhb lanhe lanhp lanht laqhb laqhe - laqhp larcm larnv lartg lascl laset lasr lassq pttrf rot spmv - spr stedc steqr symv syr ung2l ung2r ungbr unghr ungl2 unglq - ungql ungqr ungr2 ungrq ungtr unm2l unm2r unmbr unmhr unml2 - unmlq unmql unmqr unmr2 unmr3 unmrq unmrz unmtr upgtr upmtr - ''' # [c|z]*.f - ####### - sclaux = laux + ' econd ' # s*.f - dzlaux = laux + ' secnd ' # d*.f - slasrc = lasrc + sd_lasrc # s*.f - dlasrc = lasrc + sd_lasrc # d*.f - clasrc = lasrc + cz_lasrc + ' srot srscl ' # c*.f - zlasrc = lasrc + cz_lasrc + ' drot drscl ' # z*.f - oclasrc = ' icmax1 scsum1 ' # *.f - ozlasrc = ' izmax1 dzsum1 ' # *.f - sources = ['s%s.f' % f for f in (sclaux + slasrc).split()] \ - + ['d%s.f' % f for f in (dzlaux + dlasrc).split()] \ - + ['c%s.f' % f for f in (clasrc).split()] \ - + ['z%s.f' % f for f in (zlasrc).split()] \ - + ['%s.f' % f for f in (allaux + oclasrc + ozlasrc).split()] - sources = [os.path.join(src_dir, f) for f in sources] - # Lapack 3.1: - src_dir2 = os.path.join(src_dir, '..', 'INSTALL') - sources += [os.path.join(src_dir2, p + 'lamch.f') for p in 'sdcz'] - # Lapack 3.2.1: - sources += [os.path.join(src_dir, p + 'larfp.f') for p in 'sdcz'] - sources += [os.path.join(src_dir, 'ila' + p + 'lr.f') for p in 'sdcz'] - sources += [os.path.join(src_dir, 'ila' + p + 'lc.f') for p in 'sdcz'] - # Should we check here actual existence of source files? - # Yes, the file listing is different between 3.0 and 3.1 - # versions. - sources = [f for f in sources if os.path.isfile(f)] - info = {'sources': sources, 'language': 'f77'} - self.set_info(**info) - -atlas_version_c_text = r''' -/* This file is generated from numpy/distutils/system_info.py */ -void ATL_buildinfo(void); -int main(void) { - ATL_buildinfo(); - return 0; -} -''' - -_cached_atlas_version = {} - - -def get_atlas_version(**config): - libraries = config.get('libraries', []) - library_dirs = config.get('library_dirs', []) - key = (tuple(libraries), tuple(library_dirs)) - if key in _cached_atlas_version: - return _cached_atlas_version[key] - c = cmd_config(Distribution()) - atlas_version = None - info = {} - try: - s, o = c.get_output(atlas_version_c_text, - libraries=libraries, library_dirs=library_dirs, - ) - if s and re.search(r'undefined reference to `_gfortran', o, re.M): - s, o = c.get_output(atlas_version_c_text, - libraries=libraries + ['gfortran'], - library_dirs=library_dirs, - ) - if not s: - warnings.warn(textwrap.dedent(""" - ***************************************************** - Linkage with ATLAS requires gfortran. Use - - python setup.py config_fc --fcompiler=gnu95 ... - - when building extension libraries that use ATLAS. - Make sure that -lgfortran is used for C++ extensions. - ***************************************************** - """), stacklevel=2) - dict_append(info, language='f90', - define_macros=[('ATLAS_REQUIRES_GFORTRAN', None)]) - except Exception: # failed to get version from file -- maybe on Windows - # look at directory name - for o in library_dirs: - m = re.search(r'ATLAS_(?P\d+[.]\d+[.]\d+)_', o) - if m: - atlas_version = m.group('version') - if atlas_version is not None: - break - - # final choice --- look at ATLAS_VERSION environment - # variable - if atlas_version is None: - atlas_version = os.environ.get('ATLAS_VERSION', None) - if atlas_version: - dict_append(info, define_macros=[( - 'ATLAS_INFO', _c_string_literal(atlas_version)) - ]) - else: - dict_append(info, define_macros=[('NO_ATLAS_INFO', -1)]) - return atlas_version or '?.?.?', info - - if not s: - m = re.search(r'ATLAS version (?P\d+[.]\d+[.]\d+)', o) - if m: - atlas_version = m.group('version') - if atlas_version is None: - if re.search(r'undefined symbol: ATL_buildinfo', o, re.M): - atlas_version = '3.2.1_pre3.3.6' - else: - log.info('Status: %d', s) - log.info('Output: %s', o) - - elif atlas_version == '3.2.1_pre3.3.6': - dict_append(info, define_macros=[('NO_ATLAS_INFO', -2)]) - else: - dict_append(info, define_macros=[( - 'ATLAS_INFO', _c_string_literal(atlas_version)) - ]) - result = _cached_atlas_version[key] = atlas_version, info - return result - - -class lapack_opt_info(system_info): - notfounderror = LapackNotFoundError - - # List of all known LAPACK libraries, in the default order - lapack_order = ['armpl', 'mkl', 'ssl2', 'openblas', 'flame', - 'accelerate', 'atlas', 'lapack'] - order_env_var_name = 'NPY_LAPACK_ORDER' - - def _calc_info_armpl(self): - info = get_info('lapack_armpl') - if info: - self.set_info(**info) - return True - return False - - def _calc_info_mkl(self): - info = get_info('lapack_mkl') - if info: - self.set_info(**info) - return True - return False - - def _calc_info_ssl2(self): - info = get_info('lapack_ssl2') - if info: - self.set_info(**info) - return True - return False - - def _calc_info_openblas(self): - info = get_info('openblas_lapack') - if info: - self.set_info(**info) - return True - info = get_info('openblas_clapack') - if info: - self.set_info(**info) - return True - return False - - def _calc_info_flame(self): - info = get_info('flame') - if info: - self.set_info(**info) - return True - return False - - def _calc_info_atlas(self): - info = get_info('atlas_3_10_threads') - if not info: - info = get_info('atlas_3_10') - if not info: - info = get_info('atlas_threads') - if not info: - info = get_info('atlas') - if info: - # Figure out if ATLAS has lapack... - # If not we need the lapack library, but not BLAS! - l = info.get('define_macros', []) - if ('ATLAS_WITH_LAPACK_ATLAS', None) in l \ - or ('ATLAS_WITHOUT_LAPACK', None) in l: - # Get LAPACK (with possible warnings) - # If not found we don't accept anything - # since we can't use ATLAS with LAPACK! - lapack_info = self._get_info_lapack() - if not lapack_info: - return False - dict_append(info, **lapack_info) - self.set_info(**info) - return True - return False - - def _calc_info_accelerate(self): - info = get_info('accelerate') - if info: - self.set_info(**info) - return True - return False - - def _get_info_blas(self): - # Default to get the optimized BLAS implementation - info = get_info('blas_opt') - if not info: - warnings.warn(BlasNotFoundError.__doc__ or '', stacklevel=3) - info_src = get_info('blas_src') - if not info_src: - warnings.warn(BlasSrcNotFoundError.__doc__ or '', stacklevel=3) - return {} - dict_append(info, libraries=[('fblas_src', info_src)]) - return info - - def _get_info_lapack(self): - info = get_info('lapack') - if not info: - warnings.warn(LapackNotFoundError.__doc__ or '', stacklevel=3) - info_src = get_info('lapack_src') - if not info_src: - warnings.warn(LapackSrcNotFoundError.__doc__ or '', stacklevel=3) - return {} - dict_append(info, libraries=[('flapack_src', info_src)]) - return info - - def _calc_info_lapack(self): - info = self._get_info_lapack() - if info: - info_blas = self._get_info_blas() - dict_append(info, **info_blas) - dict_append(info, define_macros=[('NO_ATLAS_INFO', 1)]) - self.set_info(**info) - return True - return False - - def _calc_info_from_envvar(self): - info = {} - info['language'] = 'f77' - info['libraries'] = [] - info['include_dirs'] = [] - info['define_macros'] = [] - info['extra_link_args'] = os.environ['NPY_LAPACK_LIBS'].split() - self.set_info(**info) - return True - - def _calc_info(self, name): - return getattr(self, '_calc_info_{}'.format(name))() - - def calc_info(self): - lapack_order, unknown_order = _parse_env_order(self.lapack_order, self.order_env_var_name) - if len(unknown_order) > 0: - raise ValueError("lapack_opt_info user defined " - "LAPACK order has unacceptable " - "values: {}".format(unknown_order)) - - if 'NPY_LAPACK_LIBS' in os.environ: - # Bypass autodetection, set language to F77 and use env var linker - # flags directly - self._calc_info_from_envvar() - return - - for lapack in lapack_order: - if self._calc_info(lapack): - return - - if 'lapack' not in lapack_order: - # Since the user may request *not* to use any library, we still need - # to raise warnings to signal missing packages! - warnings.warn(LapackNotFoundError.__doc__ or '', stacklevel=2) - warnings.warn(LapackSrcNotFoundError.__doc__ or '', stacklevel=2) - - -class _ilp64_opt_info_mixin: - symbol_suffix = None - symbol_prefix = None - - def _check_info(self, info): - macros = dict(info.get('define_macros', [])) - prefix = macros.get('BLAS_SYMBOL_PREFIX', '') - suffix = macros.get('BLAS_SYMBOL_SUFFIX', '') - - if self.symbol_prefix not in (None, prefix): - return False - - if self.symbol_suffix not in (None, suffix): - return False - - return bool(info) - - -class lapack_ilp64_opt_info(lapack_opt_info, _ilp64_opt_info_mixin): - notfounderror = LapackILP64NotFoundError - lapack_order = ['openblas64_', 'openblas_ilp64', 'accelerate'] - order_env_var_name = 'NPY_LAPACK_ILP64_ORDER' - - def _calc_info(self, name): - print('lapack_ilp64_opt_info._calc_info(name=%s)' % (name)) - info = get_info(name + '_lapack') - if self._check_info(info): - self.set_info(**info) - return True - else: - print('%s_lapack does not exist' % (name)) - return False - - -class lapack_ilp64_plain_opt_info(lapack_ilp64_opt_info): - # Same as lapack_ilp64_opt_info, but fix symbol names - symbol_prefix = '' - symbol_suffix = '' - - -class lapack64__opt_info(lapack_ilp64_opt_info): - symbol_prefix = '' - symbol_suffix = '64_' - - -class blas_opt_info(system_info): - notfounderror = BlasNotFoundError - # List of all known BLAS libraries, in the default order - - blas_order = ['armpl', 'mkl', 'ssl2', 'blis', 'openblas', - 'accelerate', 'atlas', 'blas'] - order_env_var_name = 'NPY_BLAS_ORDER' - - def _calc_info_armpl(self): - info = get_info('blas_armpl') - if info: - self.set_info(**info) - return True - return False - - def _calc_info_mkl(self): - info = get_info('blas_mkl') - if info: - self.set_info(**info) - return True - return False - - def _calc_info_ssl2(self): - info = get_info('blas_ssl2') - if info: - self.set_info(**info) - return True - return False - - def _calc_info_blis(self): - info = get_info('blis') - if info: - self.set_info(**info) - return True - return False - - def _calc_info_openblas(self): - info = get_info('openblas') - if info: - self.set_info(**info) - return True - return False - - def _calc_info_atlas(self): - info = get_info('atlas_3_10_blas_threads') - if not info: - info = get_info('atlas_3_10_blas') - if not info: - info = get_info('atlas_blas_threads') - if not info: - info = get_info('atlas_blas') - if info: - self.set_info(**info) - return True - return False - - def _calc_info_accelerate(self): - info = get_info('accelerate') - if info: - self.set_info(**info) - return True - return False - - def _calc_info_blas(self): - # Warn about a non-optimized BLAS library - warnings.warn(BlasOptNotFoundError.__doc__ or '', stacklevel=3) - info = {} - dict_append(info, define_macros=[('NO_ATLAS_INFO', 1)]) - - blas = get_info('blas') - if blas: - dict_append(info, **blas) - else: - # Not even BLAS was found! - warnings.warn(BlasNotFoundError.__doc__ or '', stacklevel=3) - - blas_src = get_info('blas_src') - if not blas_src: - warnings.warn(BlasSrcNotFoundError.__doc__ or '', stacklevel=3) - return False - dict_append(info, libraries=[('fblas_src', blas_src)]) - - self.set_info(**info) - return True - - def _calc_info_from_envvar(self): - info = {} - info['language'] = 'f77' - info['libraries'] = [] - info['include_dirs'] = [] - info['define_macros'] = [] - info['extra_link_args'] = os.environ['NPY_BLAS_LIBS'].split() - if 'NPY_CBLAS_LIBS' in os.environ: - info['define_macros'].append(('HAVE_CBLAS', None)) - info['extra_link_args'].extend( - os.environ['NPY_CBLAS_LIBS'].split()) - self.set_info(**info) - return True - - def _calc_info(self, name): - return getattr(self, '_calc_info_{}'.format(name))() - - def calc_info(self): - blas_order, unknown_order = _parse_env_order(self.blas_order, self.order_env_var_name) - if len(unknown_order) > 0: - raise ValueError("blas_opt_info user defined BLAS order has unacceptable values: {}".format(unknown_order)) - - if 'NPY_BLAS_LIBS' in os.environ: - # Bypass autodetection, set language to F77 and use env var linker - # flags directly - self._calc_info_from_envvar() - return - - for blas in blas_order: - if self._calc_info(blas): - return - - if 'blas' not in blas_order: - # Since the user may request *not* to use any library, we still need - # to raise warnings to signal missing packages! - warnings.warn(BlasNotFoundError.__doc__ or '', stacklevel=2) - warnings.warn(BlasSrcNotFoundError.__doc__ or '', stacklevel=2) - - -class blas_ilp64_opt_info(blas_opt_info, _ilp64_opt_info_mixin): - notfounderror = BlasILP64NotFoundError - blas_order = ['openblas64_', 'openblas_ilp64', 'accelerate'] - order_env_var_name = 'NPY_BLAS_ILP64_ORDER' - - def _calc_info(self, name): - info = get_info(name) - if self._check_info(info): - self.set_info(**info) - return True - return False - - -class blas_ilp64_plain_opt_info(blas_ilp64_opt_info): - symbol_prefix = '' - symbol_suffix = '' - - -class blas64__opt_info(blas_ilp64_opt_info): - symbol_prefix = '' - symbol_suffix = '64_' - - -class cblas_info(system_info): - section = 'cblas' - dir_env_var = 'CBLAS' - # No default as it's used only in blas_info - _lib_names = [] - notfounderror = BlasNotFoundError - - -class blas_info(system_info): - section = 'blas' - dir_env_var = 'BLAS' - _lib_names = ['blas'] - notfounderror = BlasNotFoundError - - def calc_info(self): - lib_dirs = self.get_lib_dirs() - opt = self.get_option_single('blas_libs', 'libraries') - blas_libs = self.get_libs(opt, self._lib_names) - info = self.check_libs(lib_dirs, blas_libs, []) - if info is None: - return - else: - info['include_dirs'] = self.get_include_dirs() - if platform.system() == 'Windows': - # The check for windows is needed because get_cblas_libs uses the - # same compiler that was used to compile Python and msvc is - # often not installed when mingw is being used. This rough - # treatment is not desirable, but windows is tricky. - info['language'] = 'f77' # XXX: is it generally true? - # If cblas is given as an option, use those - cblas_info_obj = cblas_info() - cblas_opt = cblas_info_obj.get_option_single('cblas_libs', 'libraries') - cblas_libs = cblas_info_obj.get_libs(cblas_opt, None) - if cblas_libs: - info['libraries'] = cblas_libs + blas_libs - info['define_macros'] = [('HAVE_CBLAS', None)] - else: - lib = self.get_cblas_libs(info) - if lib is not None: - info['language'] = 'c' - info['libraries'] = lib - info['define_macros'] = [('HAVE_CBLAS', None)] - self.set_info(**info) - - def get_cblas_libs(self, info): - """ Check whether we can link with CBLAS interface - - This method will search through several combinations of libraries - to check whether CBLAS is present: - - 1. Libraries in ``info['libraries']``, as is - 2. As 1. but also explicitly adding ``'cblas'`` as a library - 3. As 1. but also explicitly adding ``'blas'`` as a library - 4. Check only library ``'cblas'`` - 5. Check only library ``'blas'`` - - Parameters - ---------- - info : dict - system information dictionary for compilation and linking - - Returns - ------- - libraries : list of str or None - a list of libraries that enables the use of CBLAS interface. - Returns None if not found or a compilation error occurs. - - Since 1.17 returns a list. - """ - # primitive cblas check by looking for the header and trying to link - # cblas or blas - c = customized_ccompiler() - tmpdir = tempfile.mkdtemp() - s = textwrap.dedent("""\ - #include - int main(int argc, const char *argv[]) - { - double a[4] = {1,2,3,4}; - double b[4] = {5,6,7,8}; - return cblas_ddot(4, a, 1, b, 1) > 10; - }""") - src = os.path.join(tmpdir, 'source.c') - try: - with open(src, 'w') as f: - f.write(s) - - try: - # check we can compile (find headers) - obj = c.compile([src], output_dir=tmpdir, - include_dirs=self.get_include_dirs()) - except (distutils.ccompiler.CompileError, distutils.ccompiler.LinkError): - return None - - # check we can link (find library) - # some systems have separate cblas and blas libs. - for libs in [info['libraries'], ['cblas'] + info['libraries'], - ['blas'] + info['libraries'], ['cblas'], ['blas']]: - try: - c.link_executable(obj, os.path.join(tmpdir, "a.out"), - libraries=libs, - library_dirs=info['library_dirs'], - extra_postargs=info.get('extra_link_args', [])) - return libs - except distutils.ccompiler.LinkError: - pass - finally: - shutil.rmtree(tmpdir) - return None - - -class openblas_info(blas_info): - section = 'openblas' - dir_env_var = 'OPENBLAS' - _lib_names = ['openblas'] - _require_symbols = [] - notfounderror = BlasNotFoundError - - @property - def symbol_prefix(self): - try: - return self.cp.get(self.section, 'symbol_prefix') - except NoOptionError: - return '' - - @property - def symbol_suffix(self): - try: - return self.cp.get(self.section, 'symbol_suffix') - except NoOptionError: - return '' - - def _calc_info(self): - c = customized_ccompiler() - - lib_dirs = self.get_lib_dirs() - - # Prefer to use libraries over openblas_libs - opt = self.get_option_single('openblas_libs', 'libraries') - openblas_libs = self.get_libs(opt, self._lib_names) - - info = self.check_libs(lib_dirs, openblas_libs, []) - - if c.compiler_type == "msvc" and info is None: - from numpy.distutils.fcompiler import new_fcompiler - f = new_fcompiler(c_compiler=c) - if f and f.compiler_type == 'gnu95': - # Try gfortran-compatible library files - info = self.check_msvc_gfortran_libs(lib_dirs, openblas_libs) - # Skip lapack check, we'd need build_ext to do it - skip_symbol_check = True - elif info: - skip_symbol_check = False - info['language'] = 'c' - - if info is None: - return None - - # Add extra info for OpenBLAS - extra_info = self.calc_extra_info() - dict_append(info, **extra_info) - - if not (skip_symbol_check or self.check_symbols(info)): - return None - - info['define_macros'] = [('HAVE_CBLAS', None)] - if self.symbol_prefix: - info['define_macros'] += [('BLAS_SYMBOL_PREFIX', self.symbol_prefix)] - if self.symbol_suffix: - info['define_macros'] += [('BLAS_SYMBOL_SUFFIX', self.symbol_suffix)] - - return info - - def calc_info(self): - info = self._calc_info() - if info is not None: - self.set_info(**info) - - def check_msvc_gfortran_libs(self, library_dirs, libraries): - # First, find the full path to each library directory - library_paths = [] - for library in libraries: - for library_dir in library_dirs: - # MinGW static ext will be .a - fullpath = os.path.join(library_dir, library + '.a') - if os.path.isfile(fullpath): - library_paths.append(fullpath) - break - else: - return None - - # Generate numpy.distutils virtual static library file - basename = self.__class__.__name__ - tmpdir = os.path.join(os.getcwd(), 'build', basename) - if not os.path.isdir(tmpdir): - os.makedirs(tmpdir) - - info = {'library_dirs': [tmpdir], - 'libraries': [basename], - 'language': 'f77'} - - fake_lib_file = os.path.join(tmpdir, basename + '.fobjects') - fake_clib_file = os.path.join(tmpdir, basename + '.cobjects') - with open(fake_lib_file, 'w') as f: - f.write("\n".join(library_paths)) - with open(fake_clib_file, 'w') as f: - pass - - return info - - def check_symbols(self, info): - res = False - c = customized_ccompiler() - - tmpdir = tempfile.mkdtemp() - - prototypes = "\n".join("void %s%s%s();" % (self.symbol_prefix, - symbol_name, - self.symbol_suffix) - for symbol_name in self._require_symbols) - calls = "\n".join("%s%s%s();" % (self.symbol_prefix, - symbol_name, - self.symbol_suffix) - for symbol_name in self._require_symbols) - s = textwrap.dedent("""\ - %(prototypes)s - int main(int argc, const char *argv[]) - { - %(calls)s - return 0; - }""") % dict(prototypes=prototypes, calls=calls) - src = os.path.join(tmpdir, 'source.c') - out = os.path.join(tmpdir, 'a.out') - # Add the additional "extra" arguments - try: - extra_args = info['extra_link_args'] - except Exception: - extra_args = [] - try: - with open(src, 'w') as f: - f.write(s) - obj = c.compile([src], output_dir=tmpdir) - try: - c.link_executable(obj, out, libraries=info['libraries'], - library_dirs=info['library_dirs'], - extra_postargs=extra_args) - res = True - except distutils.ccompiler.LinkError: - res = False - finally: - shutil.rmtree(tmpdir) - return res - -class openblas_lapack_info(openblas_info): - section = 'openblas' - dir_env_var = 'OPENBLAS' - _lib_names = ['openblas'] - _require_symbols = ['zungqr_'] - notfounderror = BlasNotFoundError - -class openblas_clapack_info(openblas_lapack_info): - _lib_names = ['openblas', 'lapack'] - -class openblas_ilp64_info(openblas_info): - section = 'openblas_ilp64' - dir_env_var = 'OPENBLAS_ILP64' - _lib_names = ['openblas64'] - _require_symbols = ['dgemm_', 'cblas_dgemm'] - notfounderror = BlasILP64NotFoundError - - def _calc_info(self): - info = super()._calc_info() - if info is not None: - info['define_macros'] += [('HAVE_BLAS_ILP64', None)] - return info - -class openblas_ilp64_lapack_info(openblas_ilp64_info): - _require_symbols = ['dgemm_', 'cblas_dgemm', 'zungqr_', 'LAPACKE_zungqr'] - - def _calc_info(self): - info = super()._calc_info() - if info: - info['define_macros'] += [('HAVE_LAPACKE', None)] - return info - -class openblas64__info(openblas_ilp64_info): - # ILP64 Openblas, with default symbol suffix - section = 'openblas64_' - dir_env_var = 'OPENBLAS64_' - _lib_names = ['openblas64_'] - symbol_suffix = '64_' - symbol_prefix = '' - -class openblas64__lapack_info(openblas_ilp64_lapack_info, openblas64__info): - pass - -class blis_info(blas_info): - section = 'blis' - dir_env_var = 'BLIS' - _lib_names = ['blis'] - notfounderror = BlasNotFoundError - - def calc_info(self): - lib_dirs = self.get_lib_dirs() - opt = self.get_option_single('blis_libs', 'libraries') - blis_libs = self.get_libs(opt, self._lib_names) - info = self.check_libs2(lib_dirs, blis_libs, []) - if info is None: - return - - # Add include dirs - incl_dirs = self.get_include_dirs() - dict_append(info, - language='c', - define_macros=[('HAVE_CBLAS', None)], - include_dirs=incl_dirs) - self.set_info(**info) - - -class flame_info(system_info): - """ Usage of libflame for LAPACK operations - - This requires libflame to be compiled with lapack wrappers: - - ./configure --enable-lapack2flame ... - - Be aware that libflame 5.1.0 has some missing names in the shared library, so - if you have problems, try the static flame library. - """ - section = 'flame' - _lib_names = ['flame'] - notfounderror = FlameNotFoundError - - def check_embedded_lapack(self, info): - """ libflame does not necessarily have a wrapper for fortran LAPACK, we need to check """ - c = customized_ccompiler() - - tmpdir = tempfile.mkdtemp() - s = textwrap.dedent("""\ - void zungqr_(); - int main(int argc, const char *argv[]) - { - zungqr_(); - return 0; - }""") - src = os.path.join(tmpdir, 'source.c') - out = os.path.join(tmpdir, 'a.out') - # Add the additional "extra" arguments - extra_args = info.get('extra_link_args', []) - try: - with open(src, 'w') as f: - f.write(s) - obj = c.compile([src], output_dir=tmpdir) - try: - c.link_executable(obj, out, libraries=info['libraries'], - library_dirs=info['library_dirs'], - extra_postargs=extra_args) - return True - except distutils.ccompiler.LinkError: - return False - finally: - shutil.rmtree(tmpdir) - - def calc_info(self): - lib_dirs = self.get_lib_dirs() - flame_libs = self.get_libs('libraries', self._lib_names) - - info = self.check_libs2(lib_dirs, flame_libs, []) - if info is None: - return - - # Add the extra flag args to info - extra_info = self.calc_extra_info() - dict_append(info, **extra_info) - - if self.check_embedded_lapack(info): - # check if the user has supplied all information required - self.set_info(**info) - else: - # Try and get the BLAS lib to see if we can get it to work - blas_info = get_info('blas_opt') - if not blas_info: - # since we already failed once, this ain't going to work either - return - - # Now we need to merge the two dictionaries - for key in blas_info: - if isinstance(blas_info[key], list): - info[key] = info.get(key, []) + blas_info[key] - elif isinstance(blas_info[key], tuple): - info[key] = info.get(key, ()) + blas_info[key] - else: - info[key] = info.get(key, '') + blas_info[key] - - # Now check again - if self.check_embedded_lapack(info): - self.set_info(**info) - - -class accelerate_info(system_info): - section = 'accelerate' - _lib_names = ['accelerate', 'veclib'] - notfounderror = BlasNotFoundError - - def calc_info(self): - # Make possible to enable/disable from config file/env var - libraries = os.environ.get('ACCELERATE') - if libraries: - libraries = [libraries] - else: - libraries = self.get_libs('libraries', self._lib_names) - libraries = [lib.strip().lower() for lib in libraries] - - if (sys.platform == 'darwin' and - not os.getenv('_PYTHON_HOST_PLATFORM', None)): - # Use the system BLAS from Accelerate or vecLib under OSX - args = [] - link_args = [] - if get_platform()[-4:] == 'i386' or 'intel' in get_platform() or \ - 'x86_64' in get_platform() or \ - 'i386' in platform.platform(): - intel = 1 - else: - intel = 0 - if (os.path.exists('/System/Library/Frameworks' - '/Accelerate.framework/') and - 'accelerate' in libraries): - if intel: - args.extend(['-msse3']) - args.extend([ - '-I/System/Library/Frameworks/vecLib.framework/Headers']) - link_args.extend(['-Wl,-framework', '-Wl,Accelerate']) - elif (os.path.exists('/System/Library/Frameworks' - '/vecLib.framework/') and - 'veclib' in libraries): - if intel: - args.extend(['-msse3']) - args.extend([ - '-I/System/Library/Frameworks/vecLib.framework/Headers']) - link_args.extend(['-Wl,-framework', '-Wl,vecLib']) - - if args: - macros = [ - ('NO_ATLAS_INFO', 3), - ('HAVE_CBLAS', None), - ('ACCELERATE_NEW_LAPACK', None), - ] - if(os.getenv('NPY_USE_BLAS_ILP64', None)): - print('Setting HAVE_BLAS_ILP64') - macros += [ - ('HAVE_BLAS_ILP64', None), - ('ACCELERATE_LAPACK_ILP64', None), - ] - self.set_info(extra_compile_args=args, - extra_link_args=link_args, - define_macros=macros) - - return - -class accelerate_lapack_info(accelerate_info): - def _calc_info(self): - return super()._calc_info() - -class blas_src_info(system_info): - # BLAS_SRC is deprecated, please do not use this! - # Build or install a BLAS library via your package manager or from - # source separately. - section = 'blas_src' - dir_env_var = 'BLAS_SRC' - notfounderror = BlasSrcNotFoundError - - def get_paths(self, section, key): - pre_dirs = system_info.get_paths(self, section, key) - dirs = [] - for d in pre_dirs: - dirs.extend([d] + self.combine_paths(d, ['blas'])) - return [d for d in dirs if os.path.isdir(d)] - - def calc_info(self): - src_dirs = self.get_src_dirs() - src_dir = '' - for d in src_dirs: - if os.path.isfile(os.path.join(d, 'daxpy.f')): - src_dir = d - break - if not src_dir: - #XXX: Get sources from netlib. May be ask first. - return - blas1 = ''' - caxpy csscal dnrm2 dzasum saxpy srotg zdotc ccopy cswap drot - dznrm2 scasum srotm zdotu cdotc dasum drotg icamax scnrm2 - srotmg zdrot cdotu daxpy drotm idamax scopy sscal zdscal crotg - dcabs1 drotmg isamax sdot sswap zrotg cscal dcopy dscal izamax - snrm2 zaxpy zscal csrot ddot dswap sasum srot zcopy zswap - scabs1 - ''' - blas2 = ''' - cgbmv chpmv ctrsv dsymv dtrsv sspr2 strmv zhemv ztpmv cgemv - chpr dgbmv dsyr lsame ssymv strsv zher ztpsv cgerc chpr2 dgemv - dsyr2 sgbmv ssyr xerbla zher2 ztrmv cgeru ctbmv dger dtbmv - sgemv ssyr2 zgbmv zhpmv ztrsv chbmv ctbsv dsbmv dtbsv sger - stbmv zgemv zhpr chemv ctpmv dspmv dtpmv ssbmv stbsv zgerc - zhpr2 cher ctpsv dspr dtpsv sspmv stpmv zgeru ztbmv cher2 - ctrmv dspr2 dtrmv sspr stpsv zhbmv ztbsv - ''' - blas3 = ''' - cgemm csymm ctrsm dsyrk sgemm strmm zhemm zsyr2k chemm csyr2k - dgemm dtrmm ssymm strsm zher2k zsyrk cher2k csyrk dsymm dtrsm - ssyr2k zherk ztrmm cherk ctrmm dsyr2k ssyrk zgemm zsymm ztrsm - ''' - sources = [os.path.join(src_dir, f + '.f') \ - for f in (blas1 + blas2 + blas3).split()] - #XXX: should we check here actual existence of source files? - sources = [f for f in sources if os.path.isfile(f)] - info = {'sources': sources, 'language': 'f77'} - self.set_info(**info) - - -class x11_info(system_info): - section = 'x11' - notfounderror = X11NotFoundError - _lib_names = ['X11'] - - def __init__(self): - system_info.__init__(self, - default_lib_dirs=default_x11_lib_dirs, - default_include_dirs=default_x11_include_dirs) - - def calc_info(self): - if sys.platform in ['win32']: - return - lib_dirs = self.get_lib_dirs() - include_dirs = self.get_include_dirs() - opt = self.get_option_single('x11_libs', 'libraries') - x11_libs = self.get_libs(opt, self._lib_names) - info = self.check_libs(lib_dirs, x11_libs, []) - if info is None: - return - inc_dir = None - for d in include_dirs: - if self.combine_paths(d, 'X11/X.h'): - inc_dir = d - break - if inc_dir is not None: - dict_append(info, include_dirs=[inc_dir]) - self.set_info(**info) - - -class _numpy_info(system_info): - section = 'Numeric' - modulename = 'Numeric' - notfounderror = NumericNotFoundError - - def __init__(self): - include_dirs = [] - try: - module = __import__(self.modulename) - prefix = [] - for name in module.__file__.split(os.sep): - if name == 'lib': - break - prefix.append(name) - - # Ask numpy for its own include path before attempting - # anything else - try: - include_dirs.append(getattr(module, 'get_include')()) - except AttributeError: - pass - - include_dirs.append(sysconfig.get_path('include')) - except ImportError: - pass - py_incl_dir = sysconfig.get_path('include') - include_dirs.append(py_incl_dir) - py_pincl_dir = sysconfig.get_path('platinclude') - if py_pincl_dir not in include_dirs: - include_dirs.append(py_pincl_dir) - for d in default_include_dirs: - d = os.path.join(d, os.path.basename(py_incl_dir)) - if d not in include_dirs: - include_dirs.append(d) - system_info.__init__(self, - default_lib_dirs=[], - default_include_dirs=include_dirs) - - def calc_info(self): - try: - module = __import__(self.modulename) - except ImportError: - return - info = {} - macros = [] - for v in ['__version__', 'version']: - vrs = getattr(module, v, None) - if vrs is None: - continue - macros = [(self.modulename.upper() + '_VERSION', - _c_string_literal(vrs)), - (self.modulename.upper(), None)] - break - dict_append(info, define_macros=macros) - include_dirs = self.get_include_dirs() - inc_dir = None - for d in include_dirs: - if self.combine_paths(d, - os.path.join(self.modulename, - 'arrayobject.h')): - inc_dir = d - break - if inc_dir is not None: - dict_append(info, include_dirs=[inc_dir]) - if info: - self.set_info(**info) - return - - -class numarray_info(_numpy_info): - section = 'numarray' - modulename = 'numarray' - - -class Numeric_info(_numpy_info): - section = 'Numeric' - modulename = 'Numeric' - - -class numpy_info(_numpy_info): - section = 'numpy' - modulename = 'numpy' - - -class numerix_info(system_info): - section = 'numerix' - - def calc_info(self): - which = None, None - if os.getenv("NUMERIX"): - which = os.getenv("NUMERIX"), "environment var" - # If all the above fail, default to numpy. - if which[0] is None: - which = "numpy", "defaulted" - try: - import numpy # noqa: F401 - which = "numpy", "defaulted" - except ImportError as e: - msg1 = str(e) - try: - import Numeric # noqa: F401 - which = "numeric", "defaulted" - except ImportError as e: - msg2 = str(e) - try: - import numarray # noqa: F401 - which = "numarray", "defaulted" - except ImportError as e: - msg3 = str(e) - log.info(msg1) - log.info(msg2) - log.info(msg3) - which = which[0].strip().lower(), which[1] - if which[0] not in ["numeric", "numarray", "numpy"]: - raise ValueError("numerix selector must be either 'Numeric' " - "or 'numarray' or 'numpy' but the value obtained" - " from the %s was '%s'." % (which[1], which[0])) - os.environ['NUMERIX'] = which[0] - self.set_info(**get_info(which[0])) - - -class f2py_info(system_info): - def calc_info(self): - try: - import numpy.f2py as f2py - except ImportError: - return - f2py_dir = os.path.join(os.path.dirname(f2py.__file__), 'src') - self.set_info(sources=[os.path.join(f2py_dir, 'fortranobject.c')], - include_dirs=[f2py_dir]) - return - - -class boost_python_info(system_info): - section = 'boost_python' - dir_env_var = 'BOOST' - - def get_paths(self, section, key): - pre_dirs = system_info.get_paths(self, section, key) - dirs = [] - for d in pre_dirs: - dirs.extend([d] + self.combine_paths(d, ['boost*'])) - return [d for d in dirs if os.path.isdir(d)] - - def calc_info(self): - src_dirs = self.get_src_dirs() - src_dir = '' - for d in src_dirs: - if os.path.isfile(os.path.join(d, 'libs', 'python', 'src', - 'module.cpp')): - src_dir = d - break - if not src_dir: - return - py_incl_dirs = [sysconfig.get_path('include')] - py_pincl_dir = sysconfig.get_path('platinclude') - if py_pincl_dir not in py_incl_dirs: - py_incl_dirs.append(py_pincl_dir) - srcs_dir = os.path.join(src_dir, 'libs', 'python', 'src') - bpl_srcs = glob(os.path.join(srcs_dir, '*.cpp')) - bpl_srcs += glob(os.path.join(srcs_dir, '*', '*.cpp')) - info = {'libraries': [('boost_python_src', - {'include_dirs': [src_dir] + py_incl_dirs, - 'sources':bpl_srcs} - )], - 'include_dirs': [src_dir], - } - if info: - self.set_info(**info) - return - - -class agg2_info(system_info): - section = 'agg2' - dir_env_var = 'AGG2' - - def get_paths(self, section, key): - pre_dirs = system_info.get_paths(self, section, key) - dirs = [] - for d in pre_dirs: - dirs.extend([d] + self.combine_paths(d, ['agg2*'])) - return [d for d in dirs if os.path.isdir(d)] - - def calc_info(self): - src_dirs = self.get_src_dirs() - src_dir = '' - for d in src_dirs: - if os.path.isfile(os.path.join(d, 'src', 'agg_affine_matrix.cpp')): - src_dir = d - break - if not src_dir: - return - if sys.platform == 'win32': - agg2_srcs = glob(os.path.join(src_dir, 'src', 'platform', - 'win32', 'agg_win32_bmp.cpp')) - else: - agg2_srcs = glob(os.path.join(src_dir, 'src', '*.cpp')) - agg2_srcs += [os.path.join(src_dir, 'src', 'platform', - 'X11', - 'agg_platform_support.cpp')] - - info = {'libraries': - [('agg2_src', - {'sources': agg2_srcs, - 'include_dirs': [os.path.join(src_dir, 'include')], - } - )], - 'include_dirs': [os.path.join(src_dir, 'include')], - } - if info: - self.set_info(**info) - return - - -class _pkg_config_info(system_info): - section = None - config_env_var = 'PKG_CONFIG' - default_config_exe = 'pkg-config' - append_config_exe = '' - version_macro_name = None - release_macro_name = None - version_flag = '--modversion' - cflags_flag = '--cflags' - - def get_config_exe(self): - if self.config_env_var in os.environ: - return os.environ[self.config_env_var] - return self.default_config_exe - - def get_config_output(self, config_exe, option): - cmd = config_exe + ' ' + self.append_config_exe + ' ' + option - try: - o = subprocess.check_output(cmd) - except (OSError, subprocess.CalledProcessError): - pass - else: - o = filepath_from_subprocess_output(o) - return o - - def calc_info(self): - config_exe = find_executable(self.get_config_exe()) - if not config_exe: - log.warn('File not found: %s. Cannot determine %s info.' \ - % (config_exe, self.section)) - return - info = {} - macros = [] - libraries = [] - library_dirs = [] - include_dirs = [] - extra_link_args = [] - extra_compile_args = [] - version = self.get_config_output(config_exe, self.version_flag) - if version: - macros.append((self.__class__.__name__.split('.')[-1].upper(), - _c_string_literal(version))) - if self.version_macro_name: - macros.append((self.version_macro_name + '_%s' - % (version.replace('.', '_')), None)) - if self.release_macro_name: - release = self.get_config_output(config_exe, '--release') - if release: - macros.append((self.release_macro_name + '_%s' - % (release.replace('.', '_')), None)) - opts = self.get_config_output(config_exe, '--libs') - if opts: - for opt in opts.split(): - if opt[:2] == '-l': - libraries.append(opt[2:]) - elif opt[:2] == '-L': - library_dirs.append(opt[2:]) - else: - extra_link_args.append(opt) - opts = self.get_config_output(config_exe, self.cflags_flag) - if opts: - for opt in opts.split(): - if opt[:2] == '-I': - include_dirs.append(opt[2:]) - elif opt[:2] == '-D': - if '=' in opt: - n, v = opt[2:].split('=') - macros.append((n, v)) - else: - macros.append((opt[2:], None)) - else: - extra_compile_args.append(opt) - if macros: - dict_append(info, define_macros=macros) - if libraries: - dict_append(info, libraries=libraries) - if library_dirs: - dict_append(info, library_dirs=library_dirs) - if include_dirs: - dict_append(info, include_dirs=include_dirs) - if extra_link_args: - dict_append(info, extra_link_args=extra_link_args) - if extra_compile_args: - dict_append(info, extra_compile_args=extra_compile_args) - if info: - self.set_info(**info) - return - - -class wx_info(_pkg_config_info): - section = 'wx' - config_env_var = 'WX_CONFIG' - default_config_exe = 'wx-config' - append_config_exe = '' - version_macro_name = 'WX_VERSION' - release_macro_name = 'WX_RELEASE' - version_flag = '--version' - cflags_flag = '--cxxflags' - - -class gdk_pixbuf_xlib_2_info(_pkg_config_info): - section = 'gdk_pixbuf_xlib_2' - append_config_exe = 'gdk-pixbuf-xlib-2.0' - version_macro_name = 'GDK_PIXBUF_XLIB_VERSION' - - -class gdk_pixbuf_2_info(_pkg_config_info): - section = 'gdk_pixbuf_2' - append_config_exe = 'gdk-pixbuf-2.0' - version_macro_name = 'GDK_PIXBUF_VERSION' - - -class gdk_x11_2_info(_pkg_config_info): - section = 'gdk_x11_2' - append_config_exe = 'gdk-x11-2.0' - version_macro_name = 'GDK_X11_VERSION' - - -class gdk_2_info(_pkg_config_info): - section = 'gdk_2' - append_config_exe = 'gdk-2.0' - version_macro_name = 'GDK_VERSION' - - -class gdk_info(_pkg_config_info): - section = 'gdk' - append_config_exe = 'gdk' - version_macro_name = 'GDK_VERSION' - - -class gtkp_x11_2_info(_pkg_config_info): - section = 'gtkp_x11_2' - append_config_exe = 'gtk+-x11-2.0' - version_macro_name = 'GTK_X11_VERSION' - - -class gtkp_2_info(_pkg_config_info): - section = 'gtkp_2' - append_config_exe = 'gtk+-2.0' - version_macro_name = 'GTK_VERSION' - - -class xft_info(_pkg_config_info): - section = 'xft' - append_config_exe = 'xft' - version_macro_name = 'XFT_VERSION' - - -class freetype2_info(_pkg_config_info): - section = 'freetype2' - append_config_exe = 'freetype2' - version_macro_name = 'FREETYPE2_VERSION' - - -class amd_info(system_info): - section = 'amd' - dir_env_var = 'AMD' - _lib_names = ['amd'] - - def calc_info(self): - lib_dirs = self.get_lib_dirs() - - opt = self.get_option_single('amd_libs', 'libraries') - amd_libs = self.get_libs(opt, self._lib_names) - info = self.check_libs(lib_dirs, amd_libs, []) - if info is None: - return - - include_dirs = self.get_include_dirs() - - inc_dir = None - for d in include_dirs: - p = self.combine_paths(d, 'amd.h') - if p: - inc_dir = os.path.dirname(p[0]) - break - if inc_dir is not None: - dict_append(info, include_dirs=[inc_dir], - define_macros=[('SCIPY_AMD_H', None)], - swig_opts=['-I' + inc_dir]) - - self.set_info(**info) - return - - -class umfpack_info(system_info): - section = 'umfpack' - dir_env_var = 'UMFPACK' - notfounderror = UmfpackNotFoundError - _lib_names = ['umfpack'] - - def calc_info(self): - lib_dirs = self.get_lib_dirs() - - opt = self.get_option_single('umfpack_libs', 'libraries') - umfpack_libs = self.get_libs(opt, self._lib_names) - info = self.check_libs(lib_dirs, umfpack_libs, []) - if info is None: - return - - include_dirs = self.get_include_dirs() - - inc_dir = None - for d in include_dirs: - p = self.combine_paths(d, ['', 'umfpack'], 'umfpack.h') - if p: - inc_dir = os.path.dirname(p[0]) - break - if inc_dir is not None: - dict_append(info, include_dirs=[inc_dir], - define_macros=[('SCIPY_UMFPACK_H', None)], - swig_opts=['-I' + inc_dir]) - - dict_append(info, **get_info('amd')) - - self.set_info(**info) - return - - -def combine_paths(*args, **kws): - """ Return a list of existing paths composed by all combinations of - items from arguments. - """ - r = [] - for a in args: - if not a: - continue - if is_string(a): - a = [a] - r.append(a) - args = r - if not args: - return [] - if len(args) == 1: - result = reduce(lambda a, b: a + b, map(glob, args[0]), []) - elif len(args) == 2: - result = [] - for a0 in args[0]: - for a1 in args[1]: - result.extend(glob(os.path.join(a0, a1))) - else: - result = combine_paths(*(combine_paths(args[0], args[1]) + args[2:])) - log.debug('(paths: %s)', ','.join(result)) - return result - -language_map = {'c': 0, 'c++': 1, 'f77': 2, 'f90': 3} -inv_language_map = {0: 'c', 1: 'c++', 2: 'f77', 3: 'f90'} - - -def dict_append(d, **kws): - languages = [] - for k, v in kws.items(): - if k == 'language': - languages.append(v) - continue - if k in d: - if k in ['library_dirs', 'include_dirs', - 'extra_compile_args', 'extra_link_args', - 'runtime_library_dirs', 'define_macros']: - [d[k].append(vv) for vv in v if vv not in d[k]] - else: - d[k].extend(v) - else: - d[k] = v - if languages: - l = inv_language_map[max([language_map.get(l, 0) for l in languages])] - d['language'] = l - return - - -def parseCmdLine(argv=(None,)): - import optparse - parser = optparse.OptionParser("usage: %prog [-v] [info objs]") - parser.add_option('-v', '--verbose', action='store_true', dest='verbose', - default=False, - help='be verbose and print more messages') - - opts, args = parser.parse_args(args=argv[1:]) - return opts, args - - -def show_all(argv=None): - import inspect - if argv is None: - argv = sys.argv - opts, args = parseCmdLine(argv) - if opts.verbose: - log.set_threshold(log.DEBUG) - else: - log.set_threshold(log.INFO) - show_only = [] - for n in args: - if n[-5:] != '_info': - n = n + '_info' - show_only.append(n) - show_all = not show_only - _gdict_ = globals().copy() - for name, c in _gdict_.items(): - if not inspect.isclass(c): - continue - if not issubclass(c, system_info) or c is system_info: - continue - if not show_all: - if name not in show_only: - continue - del show_only[show_only.index(name)] - conf = c() - conf.verbosity = 2 - # we don't need the result, but we want - # the side effect of printing diagnostics - conf.get_info() - if show_only: - log.info('Info classes not defined: %s', ','.join(show_only)) - -if __name__ == "__main__": - show_all() diff --git a/numpy/distutils/tests/__init__.py b/numpy/distutils/tests/__init__.py deleted file mode 100644 index e69de29bb2d1..000000000000 diff --git a/numpy/distutils/tests/test_build_ext.py b/numpy/distutils/tests/test_build_ext.py deleted file mode 100644 index 7124cc407a2f..000000000000 --- a/numpy/distutils/tests/test_build_ext.py +++ /dev/null @@ -1,74 +0,0 @@ -'''Tests for numpy.distutils.build_ext.''' - -import os -import subprocess -import sys -from textwrap import indent, dedent -import pytest -from numpy.testing import IS_WASM - -@pytest.mark.skipif(IS_WASM, reason="cannot start subprocess in wasm") -@pytest.mark.slow -def test_multi_fortran_libs_link(tmp_path): - ''' - Ensures multiple "fake" static libraries are correctly linked. - see gh-18295 - ''' - - # We need to make sure we actually have an f77 compiler. - # This is nontrivial, so we'll borrow the utilities - # from f2py tests: - from numpy.distutils.tests.utilities import has_f77_compiler - if not has_f77_compiler(): - pytest.skip('No F77 compiler found') - - # make some dummy sources - with open(tmp_path / '_dummy1.f', 'w') as fid: - fid.write(indent(dedent('''\ - FUNCTION dummy_one() - RETURN - END FUNCTION'''), prefix=' '*6)) - with open(tmp_path / '_dummy2.f', 'w') as fid: - fid.write(indent(dedent('''\ - FUNCTION dummy_two() - RETURN - END FUNCTION'''), prefix=' '*6)) - with open(tmp_path / '_dummy.c', 'w') as fid: - # doesn't need to load - just needs to exist - fid.write('int PyInit_dummyext;') - - # make a setup file - with open(tmp_path / 'setup.py', 'w') as fid: - srctree = os.path.join(os.path.dirname(__file__), '..', '..', '..') - fid.write(dedent(f'''\ - def configuration(parent_package="", top_path=None): - from numpy.distutils.misc_util import Configuration - config = Configuration("", parent_package, top_path) - config.add_library("dummy1", sources=["_dummy1.f"]) - config.add_library("dummy2", sources=["_dummy2.f"]) - config.add_extension("dummyext", sources=["_dummy.c"], libraries=["dummy1", "dummy2"]) - return config - - - if __name__ == "__main__": - import sys - sys.path.insert(0, r"{srctree}") - from numpy.distutils.core import setup - setup(**configuration(top_path="").todict())''')) - - # build the test extension and "install" into a temporary directory - build_dir = tmp_path - subprocess.check_call([sys.executable, 'setup.py', 'build', 'install', - '--prefix', str(tmp_path / 'installdir'), - '--record', str(tmp_path / 'tmp_install_log.txt'), - ], - cwd=str(build_dir), - ) - # get the path to the so - so = None - with open(tmp_path /'tmp_install_log.txt') as fid: - for line in fid: - if 'dummyext' in line: - so = line.strip() - break - assert so is not None diff --git a/numpy/distutils/tests/test_ccompiler_opt.py b/numpy/distutils/tests/test_ccompiler_opt.py deleted file mode 100644 index 3714aea0e12e..000000000000 --- a/numpy/distutils/tests/test_ccompiler_opt.py +++ /dev/null @@ -1,808 +0,0 @@ -import re, textwrap, os -from os import sys, path -from distutils.errors import DistutilsError - -is_standalone = __name__ == '__main__' and __package__ is None -if is_standalone: - import unittest, contextlib, tempfile, shutil - sys.path.append(path.abspath(path.join(path.dirname(__file__), ".."))) - from ccompiler_opt import CCompilerOpt - - # from numpy/testing/_private/utils.py - @contextlib.contextmanager - def tempdir(*args, **kwargs): - tmpdir = tempfile.mkdtemp(*args, **kwargs) - try: - yield tmpdir - finally: - shutil.rmtree(tmpdir) - - def assert_(expr, msg=''): - if not expr: - raise AssertionError(msg) -else: - from numpy.distutils.ccompiler_opt import CCompilerOpt - from numpy.testing import assert_, tempdir - -# architectures and compilers to test -arch_compilers = dict( - x86 = ("gcc", "clang", "icc", "iccw", "msvc"), - x64 = ("gcc", "clang", "icc", "iccw", "msvc"), - ppc64 = ("gcc", "clang"), - ppc64le = ("gcc", "clang"), - armhf = ("gcc", "clang"), - aarch64 = ("gcc", "clang", "fcc"), - s390x = ("gcc", "clang"), - noarch = ("gcc",) -) - -class FakeCCompilerOpt(CCompilerOpt): - fake_info = "" - def __init__(self, trap_files="", trap_flags="", *args, **kwargs): - self.fake_trap_files = trap_files - self.fake_trap_flags = trap_flags - CCompilerOpt.__init__(self, None, **kwargs) - - def __repr__(self): - return textwrap.dedent("""\ - <<<< - march : {} - compiler : {} - ---------------- - {} - >>>> - """).format(self.cc_march, self.cc_name, self.report()) - - def dist_compile(self, sources, flags, **kwargs): - assert(isinstance(sources, list)) - assert(isinstance(flags, list)) - if self.fake_trap_files: - for src in sources: - if re.match(self.fake_trap_files, src): - self.dist_error("source is trapped by a fake interface") - if self.fake_trap_flags: - for f in flags: - if re.match(self.fake_trap_flags, f): - self.dist_error("flag is trapped by a fake interface") - # fake objects - return zip(sources, [' '.join(flags)] * len(sources)) - - def dist_info(self): - return FakeCCompilerOpt.fake_info - - @staticmethod - def dist_log(*args, stderr=False): - pass - -class _Test_CCompilerOpt: - arch = None # x86_64 - cc = None # gcc - - def setup_class(self): - FakeCCompilerOpt.conf_nocache = True - self._opt = None - - def nopt(self, *args, **kwargs): - FakeCCompilerOpt.fake_info = (self.arch, self.cc, "") - return FakeCCompilerOpt(*args, **kwargs) - - def opt(self): - if not self._opt: - self._opt = self.nopt() - return self._opt - - def march(self): - return self.opt().cc_march - - def cc_name(self): - return self.opt().cc_name - - def get_targets(self, targets, groups, **kwargs): - FakeCCompilerOpt.conf_target_groups = groups - opt = self.nopt( - cpu_baseline=kwargs.get("baseline", "min"), - cpu_dispatch=kwargs.get("dispatch", "max"), - trap_files=kwargs.get("trap_files", ""), - trap_flags=kwargs.get("trap_flags", "") - ) - with tempdir() as tmpdir: - file = os.path.join(tmpdir, "test_targets.c") - with open(file, 'w') as f: - f.write(targets) - gtargets = [] - gflags = {} - fake_objects = opt.try_dispatch([file]) - for source, flags in fake_objects: - gtar = path.basename(source).split('.')[1:-1] - glen = len(gtar) - if glen == 0: - gtar = "baseline" - elif glen == 1: - gtar = gtar[0].upper() - else: - # converting multi-target into parentheses str format to be equivalent - # to the configuration statements syntax. - gtar = ('('+' '.join(gtar)+')').upper() - gtargets.append(gtar) - gflags[gtar] = flags - - has_baseline, targets = opt.sources_status[file] - targets = targets + ["baseline"] if has_baseline else targets - # convert tuple that represent multi-target into parentheses str format - targets = [ - '('+' '.join(tar)+')' if isinstance(tar, tuple) else tar - for tar in targets - ] - if len(targets) != len(gtargets) or not all(t in gtargets for t in targets): - raise AssertionError( - "'sources_status' returns different targets than the compiled targets\n" - "%s != %s" % (targets, gtargets) - ) - # return targets from 'sources_status' since the order is matters - return targets, gflags - - def arg_regex(self, **kwargs): - map2origin = dict( - x64 = "x86", - ppc64le = "ppc64", - aarch64 = "armhf", - clang = "gcc", - ) - march = self.march(); cc_name = self.cc_name() - map_march = map2origin.get(march, march) - map_cc = map2origin.get(cc_name, cc_name) - for key in ( - march, cc_name, map_march, map_cc, - march + '_' + cc_name, - map_march + '_' + cc_name, - march + '_' + map_cc, - map_march + '_' + map_cc, - ) : - regex = kwargs.pop(key, None) - if regex is not None: - break - if regex: - if isinstance(regex, dict): - for k, v in regex.items(): - if v[-1:] not in ')}$?\\.+*': - regex[k] = v + '$' - else: - assert(isinstance(regex, str)) - if regex[-1:] not in ')}$?\\.+*': - regex += '$' - return regex - - def expect(self, dispatch, baseline="", **kwargs): - match = self.arg_regex(**kwargs) - if match is None: - return - opt = self.nopt( - cpu_baseline=baseline, cpu_dispatch=dispatch, - trap_files=kwargs.get("trap_files", ""), - trap_flags=kwargs.get("trap_flags", "") - ) - features = ' '.join(opt.cpu_dispatch_names()) - if not match: - if len(features) != 0: - raise AssertionError( - 'expected empty features, not "%s"' % features - ) - return - if not re.match(match, features, re.IGNORECASE): - raise AssertionError( - 'dispatch features "%s" not match "%s"' % (features, match) - ) - - def expect_baseline(self, baseline, dispatch="", **kwargs): - match = self.arg_regex(**kwargs) - if match is None: - return - opt = self.nopt( - cpu_baseline=baseline, cpu_dispatch=dispatch, - trap_files=kwargs.get("trap_files", ""), - trap_flags=kwargs.get("trap_flags", "") - ) - features = ' '.join(opt.cpu_baseline_names()) - if not match: - if len(features) != 0: - raise AssertionError( - 'expected empty features, not "%s"' % features - ) - return - if not re.match(match, features, re.IGNORECASE): - raise AssertionError( - 'baseline features "%s" not match "%s"' % (features, match) - ) - - def expect_flags(self, baseline, dispatch="", **kwargs): - match = self.arg_regex(**kwargs) - if match is None: - return - opt = self.nopt( - cpu_baseline=baseline, cpu_dispatch=dispatch, - trap_files=kwargs.get("trap_files", ""), - trap_flags=kwargs.get("trap_flags", "") - ) - flags = ' '.join(opt.cpu_baseline_flags()) - if not match: - if len(flags) != 0: - raise AssertionError( - 'expected empty flags not "%s"' % flags - ) - return - if not re.match(match, flags): - raise AssertionError( - 'flags "%s" not match "%s"' % (flags, match) - ) - - def expect_targets(self, targets, groups={}, **kwargs): - match = self.arg_regex(**kwargs) - if match is None: - return - targets, _ = self.get_targets(targets=targets, groups=groups, **kwargs) - targets = ' '.join(targets) - if not match: - if len(targets) != 0: - raise AssertionError( - 'expected empty targets, not "%s"' % targets - ) - return - if not re.match(match, targets, re.IGNORECASE): - raise AssertionError( - 'targets "%s" not match "%s"' % (targets, match) - ) - - def expect_target_flags(self, targets, groups={}, **kwargs): - match_dict = self.arg_regex(**kwargs) - if match_dict is None: - return - assert(isinstance(match_dict, dict)) - _, tar_flags = self.get_targets(targets=targets, groups=groups) - - for match_tar, match_flags in match_dict.items(): - if match_tar not in tar_flags: - raise AssertionError( - 'expected to find target "%s"' % match_tar - ) - flags = tar_flags[match_tar] - if not match_flags: - if len(flags) != 0: - raise AssertionError( - 'expected to find empty flags in target "%s"' % match_tar - ) - if not re.match(match_flags, flags): - raise AssertionError( - '"%s" flags "%s" not match "%s"' % (match_tar, flags, match_flags) - ) - - def test_interface(self): - wrong_arch = "ppc64" if self.arch != "ppc64" else "x86" - wrong_cc = "clang" if self.cc != "clang" else "icc" - opt = self.opt() - assert_(getattr(opt, "cc_on_" + self.arch)) - assert_(not getattr(opt, "cc_on_" + wrong_arch)) - assert_(getattr(opt, "cc_is_" + self.cc)) - assert_(not getattr(opt, "cc_is_" + wrong_cc)) - - def test_args_empty(self): - for baseline, dispatch in ( - ("", "none"), - (None, ""), - ("none +none", "none - none"), - ("none -max", "min - max"), - ("+vsx2 -VSX2", "vsx avx2 avx512f -max"), - ("max -vsx - avx + avx512f neon -MAX ", - "min -min + max -max -vsx + avx2 -avx2 +NONE") - ) : - opt = self.nopt(cpu_baseline=baseline, cpu_dispatch=dispatch) - assert(len(opt.cpu_baseline_names()) == 0) - assert(len(opt.cpu_dispatch_names()) == 0) - - def test_args_validation(self): - if self.march() == "unknown": - return - # check sanity of argument's validation - for baseline, dispatch in ( - ("unkown_feature - max +min", "unknown max min"), # unknowing features - ("#avx2", "$vsx") # groups and polices aren't acceptable - ) : - try: - self.nopt(cpu_baseline=baseline, cpu_dispatch=dispatch) - raise AssertionError("excepted an exception for invalid arguments") - except DistutilsError: - pass - - def test_skip(self): - # only takes what platform supports and skip the others - # without casing exceptions - self.expect( - "sse vsx neon", - x86="sse", ppc64="vsx", armhf="neon", unknown="" - ) - self.expect( - "sse41 avx avx2 vsx2 vsx3 neon_vfpv4 asimd", - x86 = "sse41 avx avx2", - ppc64 = "vsx2 vsx3", - armhf = "neon_vfpv4 asimd", - unknown = "" - ) - # any features in cpu_dispatch must be ignored if it's part of baseline - self.expect( - "sse neon vsx", baseline="sse neon vsx", - x86="", ppc64="", armhf="" - ) - self.expect( - "avx2 vsx3 asimdhp", baseline="avx2 vsx3 asimdhp", - x86="", ppc64="", armhf="" - ) - - def test_implies(self): - # baseline combining implied features, so we count - # on it instead of testing 'feature_implies()'' directly - self.expect_baseline( - "fma3 avx2 asimd vsx3", - # .* between two spaces can validate features in between - x86 = "sse .* sse41 .* fma3.*avx2", - ppc64 = "vsx vsx2 vsx3", - armhf = "neon neon_fp16 neon_vfpv4 asimd" - ) - """ - special cases - """ - # in icc and msvc, FMA3 and AVX2 can't be separated - # both need to implies each other, same for avx512f & cd - for f0, f1 in ( - ("fma3", "avx2"), - ("avx512f", "avx512cd"), - ): - diff = ".* sse42 .* %s .*%s$" % (f0, f1) - self.expect_baseline(f0, - x86_gcc=".* sse42 .* %s$" % f0, - x86_icc=diff, x86_iccw=diff - ) - self.expect_baseline(f1, - x86_gcc=".* avx .* %s$" % f1, - x86_icc=diff, x86_iccw=diff - ) - # in msvc, following features can't be separated too - for f in (("fma3", "avx2"), ("avx512f", "avx512cd", "avx512_skx")): - for ff in f: - self.expect_baseline(ff, - x86_msvc=".*%s" % ' '.join(f) - ) - - # in ppc64le VSX and VSX2 can't be separated - self.expect_baseline("vsx", ppc64le="vsx vsx2") - # in aarch64 following features can't be separated - for f in ("neon", "neon_fp16", "neon_vfpv4", "asimd"): - self.expect_baseline(f, aarch64="neon neon_fp16 neon_vfpv4 asimd") - - def test_args_options(self): - # max & native - for o in ("max", "native"): - if o == "native" and self.cc_name() == "msvc": - continue - self.expect(o, - trap_files=".*cpu_(sse|vsx|neon|vx).c", - x86="", ppc64="", armhf="", s390x="" - ) - self.expect(o, - trap_files=".*cpu_(sse3|vsx2|neon_vfpv4|vxe).c", - x86="sse sse2", ppc64="vsx", armhf="neon neon_fp16", - aarch64="", ppc64le="", s390x="vx" - ) - self.expect(o, - trap_files=".*cpu_(popcnt|vsx3).c", - x86="sse .* sse41", ppc64="vsx vsx2", - armhf="neon neon_fp16 .* asimd .*", - s390x="vx vxe vxe2" - ) - self.expect(o, - x86_gcc=".* xop fma4 .* avx512f .* avx512_knl avx512_knm avx512_skx .*", - # in icc, xop and fam4 aren't supported - x86_icc=".* avx512f .* avx512_knl avx512_knm avx512_skx .*", - x86_iccw=".* avx512f .* avx512_knl avx512_knm avx512_skx .*", - # in msvc, avx512_knl avx512_knm aren't supported - x86_msvc=".* xop fma4 .* avx512f .* avx512_skx .*", - armhf=".* asimd asimdhp asimddp .*", - ppc64="vsx vsx2 vsx3 vsx4.*", - s390x="vx vxe vxe2.*" - ) - # min - self.expect("min", - x86="sse sse2", x64="sse sse2 sse3", - armhf="", aarch64="neon neon_fp16 .* asimd", - ppc64="", ppc64le="vsx vsx2", s390x="" - ) - self.expect( - "min", trap_files=".*cpu_(sse2|vsx2).c", - x86="", ppc64le="" - ) - # an exception must triggered if native flag isn't supported - # when option "native" is activated through the args - try: - self.expect("native", - trap_flags=".*(-march=native|-xHost|/QxHost|-mcpu=a64fx).*", - x86=".*", ppc64=".*", armhf=".*", s390x=".*", aarch64=".*", - ) - if self.march() != "unknown": - raise AssertionError( - "excepted an exception for %s" % self.march() - ) - except DistutilsError: - if self.march() == "unknown": - raise AssertionError("excepted no exceptions") - - def test_flags(self): - self.expect_flags( - "sse sse2 vsx vsx2 neon neon_fp16 vx vxe", - x86_gcc="-msse -msse2", x86_icc="-msse -msse2", - x86_iccw="/arch:SSE2", - x86_msvc="/arch:SSE2" if self.march() == "x86" else "", - ppc64_gcc= "-mcpu=power8", - ppc64_clang="-mcpu=power8", - armhf_gcc="-mfpu=neon-fp16 -mfp16-format=ieee", - aarch64="", - s390x="-mzvector -march=arch12" - ) - # testing normalize -march - self.expect_flags( - "asimd", - aarch64="", - armhf_gcc=r"-mfp16-format=ieee -mfpu=neon-fp-armv8 -march=armv8-a\+simd" - ) - self.expect_flags( - "asimdhp", - aarch64_gcc=r"-march=armv8.2-a\+fp16", - armhf_gcc=r"-mfp16-format=ieee -mfpu=neon-fp-armv8 -march=armv8.2-a\+fp16" - ) - self.expect_flags( - "asimddp", aarch64_gcc=r"-march=armv8.2-a\+dotprod" - ) - self.expect_flags( - # asimdfhm implies asimdhp - "asimdfhm", aarch64_gcc=r"-march=armv8.2-a\+fp16\+fp16fml" - ) - self.expect_flags( - "asimddp asimdhp asimdfhm", - aarch64_gcc=r"-march=armv8.2-a\+dotprod\+fp16\+fp16fml" - ) - self.expect_flags( - "vx vxe vxe2", - s390x=r"-mzvector -march=arch13" - ) - - def test_targets_exceptions(self): - for targets in ( - "bla bla", "/*@targets", - "/*@targets */", - "/*@targets unknown */", - "/*@targets $unknown_policy avx2 */", - "/*@targets #unknown_group avx2 */", - "/*@targets $ */", - "/*@targets # vsx */", - "/*@targets #$ vsx */", - "/*@targets vsx avx2 ) */", - "/*@targets vsx avx2 (avx2 */", - "/*@targets vsx avx2 () */", - "/*@targets vsx avx2 ($autovec) */", # no features - "/*@targets vsx avx2 (xxx) */", - "/*@targets vsx avx2 (baseline) */", - ) : - try: - self.expect_targets( - targets, - x86="", armhf="", ppc64="", s390x="" - ) - if self.march() != "unknown": - raise AssertionError( - "excepted an exception for %s" % self.march() - ) - except DistutilsError: - if self.march() == "unknown": - raise AssertionError("excepted no exceptions") - - def test_targets_syntax(self): - for targets in ( - "/*@targets $keep_baseline sse vsx neon vx*/", - "/*@targets,$keep_baseline,sse,vsx,neon vx*/", - "/*@targets*$keep_baseline*sse*vsx*neon*vx*/", - """ - /* - ** @targets - ** $keep_baseline, sse vsx,neon, vx - */ - """, - """ - /* - ************@targets**************** - ** $keep_baseline, sse vsx, neon, vx - ************************************ - */ - """, - """ - /* - /////////////@targets///////////////// - //$keep_baseline//sse//vsx//neon//vx - ///////////////////////////////////// - */ - """, - """ - /* - @targets - $keep_baseline - SSE VSX NEON VX*/ - """ - ) : - self.expect_targets(targets, - x86="sse", ppc64="vsx", armhf="neon", s390x="vx", unknown="" - ) - - def test_targets(self): - # test skipping baseline features - self.expect_targets( - """ - /*@targets - sse sse2 sse41 avx avx2 avx512f - vsx vsx2 vsx3 vsx4 - neon neon_fp16 asimdhp asimddp - vx vxe vxe2 - */ - """, - baseline="avx vsx2 asimd vx vxe", - x86="avx512f avx2", armhf="asimddp asimdhp", ppc64="vsx4 vsx3", - s390x="vxe2" - ) - # test skipping non-dispatch features - self.expect_targets( - """ - /*@targets - sse41 avx avx2 avx512f - vsx2 vsx3 vsx4 - asimd asimdhp asimddp - vx vxe vxe2 - */ - """, - baseline="", dispatch="sse41 avx2 vsx2 asimd asimddp vxe2", - x86="avx2 sse41", armhf="asimddp asimd", ppc64="vsx2", s390x="vxe2" - ) - # test skipping features that not supported - self.expect_targets( - """ - /*@targets - sse2 sse41 avx2 avx512f - vsx2 vsx3 vsx4 - neon asimdhp asimddp - vx vxe vxe2 - */ - """, - baseline="", - trap_files=".*(avx2|avx512f|vsx3|vsx4|asimddp|vxe2).c", - x86="sse41 sse2", ppc64="vsx2", armhf="asimdhp neon", - s390x="vxe vx" - ) - # test skipping features that implies each other - self.expect_targets( - """ - /*@targets - sse sse2 avx fma3 avx2 avx512f avx512cd - vsx vsx2 vsx3 - neon neon_vfpv4 neon_fp16 neon_fp16 asimd asimdhp - asimddp asimdfhm - */ - """, - baseline="", - x86_gcc="avx512cd avx512f avx2 fma3 avx sse2", - x86_msvc="avx512cd avx2 avx sse2", - x86_icc="avx512cd avx2 avx sse2", - x86_iccw="avx512cd avx2 avx sse2", - ppc64="vsx3 vsx2 vsx", - ppc64le="vsx3 vsx2", - armhf="asimdfhm asimddp asimdhp asimd neon_vfpv4 neon_fp16 neon", - aarch64="asimdfhm asimddp asimdhp asimd" - ) - - def test_targets_policies(self): - # 'keep_baseline', generate objects for baseline features - self.expect_targets( - """ - /*@targets - $keep_baseline - sse2 sse42 avx2 avx512f - vsx2 vsx3 - neon neon_vfpv4 asimd asimddp - vx vxe vxe2 - */ - """, - baseline="sse41 avx2 vsx2 asimd vsx3 vxe", - x86="avx512f avx2 sse42 sse2", - ppc64="vsx3 vsx2", - armhf="asimddp asimd neon_vfpv4 neon", - # neon, neon_vfpv4, asimd implies each other - aarch64="asimddp asimd", - s390x="vxe2 vxe vx" - ) - # 'keep_sort', leave the sort as-is - self.expect_targets( - """ - /*@targets - $keep_baseline $keep_sort - avx512f sse42 avx2 sse2 - vsx2 vsx3 - asimd neon neon_vfpv4 asimddp - vxe vxe2 - */ - """, - x86="avx512f sse42 avx2 sse2", - ppc64="vsx2 vsx3", - armhf="asimd neon neon_vfpv4 asimddp", - # neon, neon_vfpv4, asimd implies each other - aarch64="asimd asimddp", - s390x="vxe vxe2" - ) - # 'autovec', skipping features that can't be - # vectorized by the compiler - self.expect_targets( - """ - /*@targets - $keep_baseline $keep_sort $autovec - avx512f avx2 sse42 sse41 sse2 - vsx3 vsx2 - asimddp asimd neon_vfpv4 neon - */ - """, - x86_gcc="avx512f avx2 sse42 sse41 sse2", - x86_icc="avx512f avx2 sse42 sse41 sse2", - x86_iccw="avx512f avx2 sse42 sse41 sse2", - x86_msvc="avx512f avx2 sse2" - if self.march() == 'x86' else "avx512f avx2", - ppc64="vsx3 vsx2", - armhf="asimddp asimd neon_vfpv4 neon", - # neon, neon_vfpv4, asimd implies each other - aarch64="asimddp asimd" - ) - for policy in ("$maxopt", "$autovec"): - # 'maxopt' and autovec set the max acceptable optimization flags - self.expect_target_flags( - "/*@targets baseline %s */" % policy, - gcc={"baseline":".*-O3.*"}, icc={"baseline":".*-O3.*"}, - iccw={"baseline":".*/O3.*"}, msvc={"baseline":".*/O2.*"}, - unknown={"baseline":".*"} - ) - - # 'werror', force compilers to treat warnings as errors - self.expect_target_flags( - "/*@targets baseline $werror */", - gcc={"baseline":".*-Werror.*"}, icc={"baseline":".*-Werror.*"}, - iccw={"baseline":".*/Werror.*"}, msvc={"baseline":".*/WX.*"}, - unknown={"baseline":".*"} - ) - - def test_targets_groups(self): - self.expect_targets( - """ - /*@targets $keep_baseline baseline #test_group */ - """, - groups=dict( - test_group=(""" - $keep_baseline - asimddp sse2 vsx2 avx2 vsx3 - avx512f asimdhp - """) - ), - x86="avx512f avx2 sse2 baseline", - ppc64="vsx3 vsx2 baseline", - armhf="asimddp asimdhp baseline" - ) - # test skip duplicating and sorting - self.expect_targets( - """ - /*@targets - * sse42 avx avx512f - * #test_group_1 - * vsx2 - * #test_group_2 - * asimddp asimdfhm - */ - """, - groups=dict( - test_group_1=(""" - VSX2 vsx3 asimd avx2 SSE41 - """), - test_group_2=(""" - vsx2 vsx3 asImd aVx2 sse41 - """) - ), - x86="avx512f avx2 avx sse42 sse41", - ppc64="vsx3 vsx2", - # vsx2 part of the default baseline of ppc64le, option ("min") - ppc64le="vsx3", - armhf="asimdfhm asimddp asimd", - # asimd part of the default baseline of aarch64, option ("min") - aarch64="asimdfhm asimddp" - ) - - def test_targets_multi(self): - self.expect_targets( - """ - /*@targets - (avx512_clx avx512_cnl) (asimdhp asimddp) - */ - """, - x86=r"\(avx512_clx avx512_cnl\)", - armhf=r"\(asimdhp asimddp\)", - ) - # test skipping implied features and auto-sort - self.expect_targets( - """ - /*@targets - f16c (sse41 avx sse42) (sse3 avx2 avx512f) - vsx2 (vsx vsx3 vsx2) - (neon neon_vfpv4 asimd asimdhp asimddp) - */ - """, - x86="avx512f f16c avx", - ppc64="vsx3 vsx2", - ppc64le="vsx3", # vsx2 part of baseline - armhf=r"\(asimdhp asimddp\)", - ) - # test skipping implied features and keep sort - self.expect_targets( - """ - /*@targets $keep_sort - (sse41 avx sse42) (sse3 avx2 avx512f) - (vsx vsx3 vsx2) - (asimddp neon neon_vfpv4 asimd asimdhp) - (vx vxe vxe2) - */ - """, - x86="avx avx512f", - ppc64="vsx3", - armhf=r"\(asimdhp asimddp\)", - s390x="vxe2" - ) - # test compiler variety and avoiding duplicating - self.expect_targets( - """ - /*@targets $keep_sort - fma3 avx2 (fma3 avx2) (avx2 fma3) avx2 fma3 - */ - """, - x86_gcc=r"fma3 avx2 \(fma3 avx2\)", - x86_icc="avx2", x86_iccw="avx2", - x86_msvc="avx2" - ) - -def new_test(arch, cc): - if is_standalone: return textwrap.dedent("""\ - class TestCCompilerOpt_{class_name}(_Test_CCompilerOpt, unittest.TestCase): - arch = '{arch}' - cc = '{cc}' - def __init__(self, methodName="runTest"): - unittest.TestCase.__init__(self, methodName) - self.setup_class() - """).format( - class_name=arch + '_' + cc, arch=arch, cc=cc - ) - return textwrap.dedent("""\ - class TestCCompilerOpt_{class_name}(_Test_CCompilerOpt): - arch = '{arch}' - cc = '{cc}' - """).format( - class_name=arch + '_' + cc, arch=arch, cc=cc - ) -""" -if 1 and is_standalone: - FakeCCompilerOpt.fake_info = "x86_icc" - cco = FakeCCompilerOpt(None, cpu_baseline="avx2") - print(' '.join(cco.cpu_baseline_names())) - print(cco.cpu_baseline_flags()) - unittest.main() - sys.exit() -""" -for arch, compilers in arch_compilers.items(): - for cc in compilers: - exec(new_test(arch, cc)) - -if is_standalone: - unittest.main() diff --git a/numpy/distutils/tests/test_ccompiler_opt_conf.py b/numpy/distutils/tests/test_ccompiler_opt_conf.py deleted file mode 100644 index d9e8b2b0a834..000000000000 --- a/numpy/distutils/tests/test_ccompiler_opt_conf.py +++ /dev/null @@ -1,176 +0,0 @@ -import unittest -from os import sys, path - -is_standalone = __name__ == '__main__' and __package__ is None -if is_standalone: - sys.path.append(path.abspath(path.join(path.dirname(__file__), ".."))) - from ccompiler_opt import CCompilerOpt -else: - from numpy.distutils.ccompiler_opt import CCompilerOpt - -arch_compilers = dict( - x86 = ("gcc", "clang", "icc", "iccw", "msvc"), - x64 = ("gcc", "clang", "icc", "iccw", "msvc"), - ppc64 = ("gcc", "clang"), - ppc64le = ("gcc", "clang"), - armhf = ("gcc", "clang"), - aarch64 = ("gcc", "clang"), - narch = ("gcc",) -) - -class FakeCCompilerOpt(CCompilerOpt): - fake_info = ("arch", "compiler", "extra_args") - def __init__(self, *args, **kwargs): - CCompilerOpt.__init__(self, None, **kwargs) - def dist_compile(self, sources, flags, **kwargs): - return sources - def dist_info(self): - return FakeCCompilerOpt.fake_info - @staticmethod - def dist_log(*args, stderr=False): - pass - -class _TestConfFeatures(FakeCCompilerOpt): - """A hook to check the sanity of configured features -- before it called by the abstract class '_Feature' - """ - - def conf_features_partial(self): - conf_all = self.conf_features - for feature_name, feature in conf_all.items(): - self.test_feature( - "attribute conf_features", - conf_all, feature_name, feature - ) - - conf_partial = FakeCCompilerOpt.conf_features_partial(self) - for feature_name, feature in conf_partial.items(): - self.test_feature( - "conf_features_partial()", - conf_partial, feature_name, feature - ) - return conf_partial - - def test_feature(self, log, search_in, feature_name, feature_dict): - error_msg = ( - "during validate '{}' within feature '{}', " - "march '{}' and compiler '{}'\n>> " - ).format(log, feature_name, self.cc_march, self.cc_name) - - if not feature_name.isupper(): - raise AssertionError(error_msg + "feature name must be in uppercase") - - for option, val in feature_dict.items(): - self.test_option_types(error_msg, option, val) - self.test_duplicates(error_msg, option, val) - - self.test_implies(error_msg, search_in, feature_name, feature_dict) - self.test_group(error_msg, search_in, feature_name, feature_dict) - self.test_extra_checks(error_msg, search_in, feature_name, feature_dict) - - def test_option_types(self, error_msg, option, val): - for tp, available in ( - ((str, list), ( - "implies", "headers", "flags", "group", "detect", "extra_checks" - )), - ((str,), ("disable",)), - ((int,), ("interest",)), - ((bool,), ("implies_detect",)), - ((bool, type(None)), ("autovec",)), - ) : - found_it = option in available - if not found_it: - continue - if not isinstance(val, tp): - error_tp = [t.__name__ for t in (*tp,)] - error_tp = ' or '.join(error_tp) - raise AssertionError(error_msg + - "expected '%s' type for option '%s' not '%s'" % ( - error_tp, option, type(val).__name__ - )) - break - - if not found_it: - raise AssertionError(error_msg + "invalid option name '%s'" % option) - - def test_duplicates(self, error_msg, option, val): - if option not in ( - "implies", "headers", "flags", "group", "detect", "extra_checks" - ) : return - - if isinstance(val, str): - val = val.split() - - if len(val) != len(set(val)): - raise AssertionError(error_msg + "duplicated values in option '%s'" % option) - - def test_implies(self, error_msg, search_in, feature_name, feature_dict): - if feature_dict.get("disabled") is not None: - return - implies = feature_dict.get("implies", "") - if not implies: - return - if isinstance(implies, str): - implies = implies.split() - - if feature_name in implies: - raise AssertionError(error_msg + "feature implies itself") - - for impl in implies: - impl_dict = search_in.get(impl) - if impl_dict is not None: - if "disable" in impl_dict: - raise AssertionError(error_msg + "implies disabled feature '%s'" % impl) - continue - raise AssertionError(error_msg + "implies non-exist feature '%s'" % impl) - - def test_group(self, error_msg, search_in, feature_name, feature_dict): - if feature_dict.get("disabled") is not None: - return - group = feature_dict.get("group", "") - if not group: - return - if isinstance(group, str): - group = group.split() - - for f in group: - impl_dict = search_in.get(f) - if not impl_dict or "disable" in impl_dict: - continue - raise AssertionError(error_msg + - "in option 'group', '%s' already exists as a feature name" % f - ) - - def test_extra_checks(self, error_msg, search_in, feature_name, feature_dict): - if feature_dict.get("disabled") is not None: - return - extra_checks = feature_dict.get("extra_checks", "") - if not extra_checks: - return - if isinstance(extra_checks, str): - extra_checks = extra_checks.split() - - for f in extra_checks: - impl_dict = search_in.get(f) - if not impl_dict or "disable" in impl_dict: - continue - raise AssertionError(error_msg + - "in option 'extra_checks', extra test case '%s' already exists as a feature name" % f - ) - -class TestConfFeatures(unittest.TestCase): - def __init__(self, methodName="runTest"): - unittest.TestCase.__init__(self, methodName) - self._setup() - - def _setup(self): - FakeCCompilerOpt.conf_nocache = True - - def test_features(self): - for arch, compilers in arch_compilers.items(): - for cc in compilers: - FakeCCompilerOpt.fake_info = (arch, cc, "") - _TestConfFeatures() - -if is_standalone: - unittest.main() diff --git a/numpy/distutils/tests/test_exec_command.py b/numpy/distutils/tests/test_exec_command.py deleted file mode 100644 index d1a20056a5a2..000000000000 --- a/numpy/distutils/tests/test_exec_command.py +++ /dev/null @@ -1,217 +0,0 @@ -import os -import pytest -import sys -from tempfile import TemporaryFile - -from numpy.distutils import exec_command -from numpy.distutils.exec_command import get_pythonexe -from numpy.testing import tempdir, assert_, assert_warns, IS_WASM - - -# In python 3 stdout, stderr are text (unicode compliant) devices, so to -# emulate them import StringIO from the io module. -from io import StringIO - -class redirect_stdout: - """Context manager to redirect stdout for exec_command test.""" - def __init__(self, stdout=None): - self._stdout = stdout or sys.stdout - - def __enter__(self): - self.old_stdout = sys.stdout - sys.stdout = self._stdout - - def __exit__(self, exc_type, exc_value, traceback): - self._stdout.flush() - sys.stdout = self.old_stdout - # note: closing sys.stdout won't close it. - self._stdout.close() - -class redirect_stderr: - """Context manager to redirect stderr for exec_command test.""" - def __init__(self, stderr=None): - self._stderr = stderr or sys.stderr - - def __enter__(self): - self.old_stderr = sys.stderr - sys.stderr = self._stderr - - def __exit__(self, exc_type, exc_value, traceback): - self._stderr.flush() - sys.stderr = self.old_stderr - # note: closing sys.stderr won't close it. - self._stderr.close() - -class emulate_nonposix: - """Context manager to emulate os.name != 'posix' """ - def __init__(self, osname='non-posix'): - self._new_name = osname - - def __enter__(self): - self._old_name = os.name - os.name = self._new_name - - def __exit__(self, exc_type, exc_value, traceback): - os.name = self._old_name - - -def test_exec_command_stdout(): - # Regression test for gh-2999 and gh-2915. - # There are several packages (nose, scipy.weave.inline, Sage inline - # Fortran) that replace stdout, in which case it doesn't have a fileno - # method. This is tested here, with a do-nothing command that fails if the - # presence of fileno() is assumed in exec_command. - - # The code has a special case for posix systems, so if we are on posix test - # both that the special case works and that the generic code works. - - # Test posix version: - with redirect_stdout(StringIO()): - with redirect_stderr(TemporaryFile()): - with assert_warns(DeprecationWarning): - exec_command.exec_command("cd '.'") - - if os.name == 'posix': - # Test general (non-posix) version: - with emulate_nonposix(): - with redirect_stdout(StringIO()): - with redirect_stderr(TemporaryFile()): - with assert_warns(DeprecationWarning): - exec_command.exec_command("cd '.'") - -def test_exec_command_stderr(): - # Test posix version: - with redirect_stdout(TemporaryFile(mode='w+')): - with redirect_stderr(StringIO()): - with assert_warns(DeprecationWarning): - exec_command.exec_command("cd '.'") - - if os.name == 'posix': - # Test general (non-posix) version: - with emulate_nonposix(): - with redirect_stdout(TemporaryFile()): - with redirect_stderr(StringIO()): - with assert_warns(DeprecationWarning): - exec_command.exec_command("cd '.'") - - -@pytest.mark.skipif(IS_WASM, reason="Cannot start subprocess") -class TestExecCommand: - def setup_method(self): - self.pyexe = get_pythonexe() - - def check_nt(self, **kws): - s, o = exec_command.exec_command('cmd /C echo path=%path%') - assert_(s == 0) - assert_(o != '') - - s, o = exec_command.exec_command( - '"%s" -c "import sys;sys.stderr.write(sys.platform)"' % self.pyexe) - assert_(s == 0) - assert_(o == 'win32') - - def check_posix(self, **kws): - s, o = exec_command.exec_command("echo Hello", **kws) - assert_(s == 0) - assert_(o == 'Hello') - - s, o = exec_command.exec_command('echo $AAA', **kws) - assert_(s == 0) - assert_(o == '') - - s, o = exec_command.exec_command('echo "$AAA"', AAA='Tere', **kws) - assert_(s == 0) - assert_(o == 'Tere') - - s, o = exec_command.exec_command('echo "$AAA"', **kws) - assert_(s == 0) - assert_(o == '') - - if 'BBB' not in os.environ: - os.environ['BBB'] = 'Hi' - s, o = exec_command.exec_command('echo "$BBB"', **kws) - assert_(s == 0) - assert_(o == 'Hi') - - s, o = exec_command.exec_command('echo "$BBB"', BBB='Hey', **kws) - assert_(s == 0) - assert_(o == 'Hey') - - s, o = exec_command.exec_command('echo "$BBB"', **kws) - assert_(s == 0) - assert_(o == 'Hi') - - del os.environ['BBB'] - - s, o = exec_command.exec_command('echo "$BBB"', **kws) - assert_(s == 0) - assert_(o == '') - - - s, o = exec_command.exec_command('this_is_not_a_command', **kws) - assert_(s != 0) - assert_(o != '') - - s, o = exec_command.exec_command('echo path=$PATH', **kws) - assert_(s == 0) - assert_(o != '') - - s, o = exec_command.exec_command( - '"%s" -c "import sys,os;sys.stderr.write(os.name)"' % - self.pyexe, **kws) - assert_(s == 0) - assert_(o == 'posix') - - def check_basic(self, *kws): - s, o = exec_command.exec_command( - '"%s" -c "raise \'Ignore me.\'"' % self.pyexe, **kws) - assert_(s != 0) - assert_(o != '') - - s, o = exec_command.exec_command( - '"%s" -c "import sys;sys.stderr.write(\'0\');' - 'sys.stderr.write(\'1\');sys.stderr.write(\'2\')"' % - self.pyexe, **kws) - assert_(s == 0) - assert_(o == '012') - - s, o = exec_command.exec_command( - '"%s" -c "import sys;sys.exit(15)"' % self.pyexe, **kws) - assert_(s == 15) - assert_(o == '') - - s, o = exec_command.exec_command( - '"%s" -c "print(\'Heipa\'")' % self.pyexe, **kws) - assert_(s == 0) - assert_(o == 'Heipa') - - def check_execute_in(self, **kws): - with tempdir() as tmpdir: - fn = "file" - tmpfile = os.path.join(tmpdir, fn) - with open(tmpfile, 'w') as f: - f.write('Hello') - - s, o = exec_command.exec_command( - '"%s" -c "f = open(\'%s\', \'r\'); f.close()"' % - (self.pyexe, fn), **kws) - assert_(s != 0) - assert_(o != '') - s, o = exec_command.exec_command( - '"%s" -c "f = open(\'%s\', \'r\'); print(f.read()); ' - 'f.close()"' % (self.pyexe, fn), execute_in=tmpdir, **kws) - assert_(s == 0) - assert_(o == 'Hello') - - def test_basic(self): - with redirect_stdout(StringIO()): - with redirect_stderr(StringIO()): - with assert_warns(DeprecationWarning): - if os.name == "posix": - self.check_posix(use_tee=0) - self.check_posix(use_tee=1) - elif os.name == "nt": - self.check_nt(use_tee=0) - self.check_nt(use_tee=1) - self.check_execute_in(use_tee=0) - self.check_execute_in(use_tee=1) diff --git a/numpy/distutils/tests/test_fcompiler.py b/numpy/distutils/tests/test_fcompiler.py deleted file mode 100644 index dd97f1e72afc..000000000000 --- a/numpy/distutils/tests/test_fcompiler.py +++ /dev/null @@ -1,43 +0,0 @@ -from numpy.testing import assert_ -import numpy.distutils.fcompiler - -customizable_flags = [ - ('f77', 'F77FLAGS'), - ('f90', 'F90FLAGS'), - ('free', 'FREEFLAGS'), - ('arch', 'FARCH'), - ('debug', 'FDEBUG'), - ('flags', 'FFLAGS'), - ('linker_so', 'LDFLAGS'), -] - - -def test_fcompiler_flags(monkeypatch): - monkeypatch.setenv('NPY_DISTUTILS_APPEND_FLAGS', '0') - fc = numpy.distutils.fcompiler.new_fcompiler(compiler='none') - flag_vars = fc.flag_vars.clone(lambda *args, **kwargs: None) - - for opt, envvar in customizable_flags: - new_flag = '-dummy-{}-flag'.format(opt) - prev_flags = getattr(flag_vars, opt) - - monkeypatch.setenv(envvar, new_flag) - new_flags = getattr(flag_vars, opt) - - monkeypatch.delenv(envvar) - assert_(new_flags == [new_flag]) - - monkeypatch.setenv('NPY_DISTUTILS_APPEND_FLAGS', '1') - - for opt, envvar in customizable_flags: - new_flag = '-dummy-{}-flag'.format(opt) - prev_flags = getattr(flag_vars, opt) - monkeypatch.setenv(envvar, new_flag) - new_flags = getattr(flag_vars, opt) - - monkeypatch.delenv(envvar) - if prev_flags is None: - assert_(new_flags == [new_flag]) - else: - assert_(new_flags == prev_flags + [new_flag]) - diff --git a/numpy/distutils/tests/test_fcompiler_gnu.py b/numpy/distutils/tests/test_fcompiler_gnu.py deleted file mode 100644 index 0817ae58c214..000000000000 --- a/numpy/distutils/tests/test_fcompiler_gnu.py +++ /dev/null @@ -1,55 +0,0 @@ -from numpy.testing import assert_ - -import numpy.distutils.fcompiler - -g77_version_strings = [ - ('GNU Fortran 0.5.25 20010319 (prerelease)', '0.5.25'), - ('GNU Fortran (GCC 3.2) 3.2 20020814 (release)', '3.2'), - ('GNU Fortran (GCC) 3.3.3 20040110 (prerelease) (Debian)', '3.3.3'), - ('GNU Fortran (GCC) 3.3.3 (Debian 20040401)', '3.3.3'), - ('GNU Fortran (GCC 3.2.2 20030222 (Red Hat Linux 3.2.2-5)) 3.2.2' - ' 20030222 (Red Hat Linux 3.2.2-5)', '3.2.2'), -] - -gfortran_version_strings = [ - ('GNU Fortran 95 (GCC 4.0.3 20051023 (prerelease) (Debian 4.0.2-3))', - '4.0.3'), - ('GNU Fortran 95 (GCC) 4.1.0', '4.1.0'), - ('GNU Fortran 95 (GCC) 4.2.0 20060218 (experimental)', '4.2.0'), - ('GNU Fortran (GCC) 4.3.0 20070316 (experimental)', '4.3.0'), - ('GNU Fortran (rubenvb-4.8.0) 4.8.0', '4.8.0'), - ('4.8.0', '4.8.0'), - ('4.0.3-7', '4.0.3'), - ("gfortran: warning: couldn't understand kern.osversion '14.1.0\n4.9.1", - '4.9.1'), - ("gfortran: warning: couldn't understand kern.osversion '14.1.0\n" - "gfortran: warning: yet another warning\n4.9.1", - '4.9.1'), - ('GNU Fortran (crosstool-NG 8a21ab48) 7.2.0', '7.2.0') -] - -class TestG77Versions: - def test_g77_version(self): - fc = numpy.distutils.fcompiler.new_fcompiler(compiler='gnu') - for vs, version in g77_version_strings: - v = fc.version_match(vs) - assert_(v == version, (vs, v)) - - def test_not_g77(self): - fc = numpy.distutils.fcompiler.new_fcompiler(compiler='gnu') - for vs, _ in gfortran_version_strings: - v = fc.version_match(vs) - assert_(v is None, (vs, v)) - -class TestGFortranVersions: - def test_gfortran_version(self): - fc = numpy.distutils.fcompiler.new_fcompiler(compiler='gnu95') - for vs, version in gfortran_version_strings: - v = fc.version_match(vs) - assert_(v == version, (vs, v)) - - def test_not_gfortran(self): - fc = numpy.distutils.fcompiler.new_fcompiler(compiler='gnu95') - for vs, _ in g77_version_strings: - v = fc.version_match(vs) - assert_(v is None, (vs, v)) diff --git a/numpy/distutils/tests/test_fcompiler_intel.py b/numpy/distutils/tests/test_fcompiler_intel.py deleted file mode 100644 index 45c9cdac1910..000000000000 --- a/numpy/distutils/tests/test_fcompiler_intel.py +++ /dev/null @@ -1,30 +0,0 @@ -import numpy.distutils.fcompiler -from numpy.testing import assert_ - - -intel_32bit_version_strings = [ - ("Intel(R) Fortran Intel(R) 32-bit Compiler Professional for applications" - "running on Intel(R) 32, Version 11.1", '11.1'), -] - -intel_64bit_version_strings = [ - ("Intel(R) Fortran IA-64 Compiler Professional for applications" - "running on IA-64, Version 11.0", '11.0'), - ("Intel(R) Fortran Intel(R) 64 Compiler Professional for applications" - "running on Intel(R) 64, Version 11.1", '11.1') -] - -class TestIntelFCompilerVersions: - def test_32bit_version(self): - fc = numpy.distutils.fcompiler.new_fcompiler(compiler='intel') - for vs, version in intel_32bit_version_strings: - v = fc.version_match(vs) - assert_(v == version) - - -class TestIntelEM64TFCompilerVersions: - def test_64bit_version(self): - fc = numpy.distutils.fcompiler.new_fcompiler(compiler='intelem') - for vs, version in intel_64bit_version_strings: - v = fc.version_match(vs) - assert_(v == version) diff --git a/numpy/distutils/tests/test_fcompiler_nagfor.py b/numpy/distutils/tests/test_fcompiler_nagfor.py deleted file mode 100644 index 2e04f5266dc1..000000000000 --- a/numpy/distutils/tests/test_fcompiler_nagfor.py +++ /dev/null @@ -1,22 +0,0 @@ -from numpy.testing import assert_ -import numpy.distutils.fcompiler - -nag_version_strings = [('nagfor', 'NAG Fortran Compiler Release ' - '6.2(Chiyoda) Build 6200', '6.2'), - ('nagfor', 'NAG Fortran Compiler Release ' - '6.1(Tozai) Build 6136', '6.1'), - ('nagfor', 'NAG Fortran Compiler Release ' - '6.0(Hibiya) Build 1021', '6.0'), - ('nagfor', 'NAG Fortran Compiler Release ' - '5.3.2(971)', '5.3.2'), - ('nag', 'NAGWare Fortran 95 compiler Release 5.1' - '(347,355-367,375,380-383,389,394,399,401-402,407,' - '431,435,437,446,459-460,463,472,494,496,503,508,' - '511,517,529,555,557,565)', '5.1')] - -class TestNagFCompilerVersions: - def test_version_match(self): - for comp, vs, version in nag_version_strings: - fc = numpy.distutils.fcompiler.new_fcompiler(compiler=comp) - v = fc.version_match(vs) - assert_(v == version) diff --git a/numpy/distutils/tests/test_from_template.py b/numpy/distutils/tests/test_from_template.py deleted file mode 100644 index 588175496299..000000000000 --- a/numpy/distutils/tests/test_from_template.py +++ /dev/null @@ -1,44 +0,0 @@ - -from numpy.distutils.from_template import process_str -from numpy.testing import assert_equal - - -pyf_src = """ -python module foo - <_rd=real,double precision> - interface - subroutine foosub(tol) - <_rd>, intent(in,out) :: tol - end subroutine foosub - end interface -end python module foo -""" - -expected_pyf = """ -python module foo - interface - subroutine sfoosub(tol) - real, intent(in,out) :: tol - end subroutine sfoosub - subroutine dfoosub(tol) - double precision, intent(in,out) :: tol - end subroutine dfoosub - end interface -end python module foo -""" - - -def normalize_whitespace(s): - """ - Remove leading and trailing whitespace, and convert internal - stretches of whitespace to a single space. - """ - return ' '.join(s.split()) - - -def test_from_template(): - """Regression test for gh-10712.""" - pyf = process_str(pyf_src) - normalized_pyf = normalize_whitespace(pyf) - normalized_expected_pyf = normalize_whitespace(expected_pyf) - assert_equal(normalized_pyf, normalized_expected_pyf) diff --git a/numpy/distutils/tests/test_log.py b/numpy/distutils/tests/test_log.py deleted file mode 100644 index 72fddf37370f..000000000000 --- a/numpy/distutils/tests/test_log.py +++ /dev/null @@ -1,34 +0,0 @@ -import io -import re -from contextlib import redirect_stdout - -import pytest - -from numpy.distutils import log - - -def setup_module(): - f = io.StringIO() # changing verbosity also logs here, capture that - with redirect_stdout(f): - log.set_verbosity(2, force=True) # i.e. DEBUG - - -def teardown_module(): - log.set_verbosity(0, force=True) # the default - - -r_ansi = re.compile(r"\x1B(?:[@-Z\\-_]|\[[0-?]*[ -/]*[@-~])") - - -@pytest.mark.parametrize("func_name", ["error", "warn", "info", "debug"]) -def test_log_prefix(func_name): - func = getattr(log, func_name) - msg = f"{func_name} message" - f = io.StringIO() - with redirect_stdout(f): - func(msg) - out = f.getvalue() - assert out # sanity check - clean_out = r_ansi.sub("", out) - line = next(line for line in clean_out.splitlines()) - assert line == f"{func_name.upper()}: {msg}" diff --git a/numpy/distutils/tests/test_mingw32ccompiler.py b/numpy/distutils/tests/test_mingw32ccompiler.py deleted file mode 100644 index c4eac7b72de1..000000000000 --- a/numpy/distutils/tests/test_mingw32ccompiler.py +++ /dev/null @@ -1,47 +0,0 @@ -import shutil -import subprocess -import sys -import pytest -import os -import sysconfig - -from numpy.distutils import mingw32ccompiler - - -@pytest.mark.skipif(sys.platform != 'win32', reason='win32 only test') -@pytest.mark.skipif(not os.path.exists(os.path.join(sys.prefix, 'libs')), - reason="test requires mingw library layout") -@pytest.mark.skipif(sysconfig.get_platform() == 'win-arm64', reason='mingw GNU objdump does not understand arm64 binary format yet') -def test_build_import(): - '''Test the mingw32ccompiler.build_import_library, which builds a - `python.a` from the MSVC `python.lib` - ''' - - # make sure `nm.exe` exists and supports the current python version. This - # can get mixed up when the PATH has a 64-bit nm but the python is 32-bit - try: - out = subprocess.check_output(['nm.exe', '--help']) - except FileNotFoundError: - pytest.skip("'nm.exe' not on path, is mingw installed?") - supported = out[out.find(b'supported targets:'):] - if sys.maxsize < 2**32: - if b'pe-i386' not in supported: - raise ValueError("'nm.exe' found but it does not support 32-bit " - "dlls when using 32-bit python. Supported " - "formats: '%s'" % supported) - elif b'pe-x86-64' not in supported: - raise ValueError("'nm.exe' found but it does not support 64-bit " - "dlls when using 64-bit python. Supported " - "formats: '%s'" % supported) - # Hide the import library to force a build - has_import_lib, fullpath = mingw32ccompiler._check_for_import_lib() - if has_import_lib: - shutil.move(fullpath, fullpath + '.bak') - - try: - # Whew, now we can actually test the function - mingw32ccompiler.build_import_library() - - finally: - if has_import_lib: - shutil.move(fullpath + '.bak', fullpath) diff --git a/numpy/distutils/tests/test_misc_util.py b/numpy/distutils/tests/test_misc_util.py deleted file mode 100644 index 40e7606eeb76..000000000000 --- a/numpy/distutils/tests/test_misc_util.py +++ /dev/null @@ -1,88 +0,0 @@ -from os.path import join, sep, dirname - -import pytest - -from numpy.distutils.misc_util import ( - appendpath, minrelpath, gpaths, get_shared_lib_extension, get_info - ) -from numpy.testing import ( - assert_, assert_equal, IS_EDITABLE - ) - -ajoin = lambda *paths: join(*((sep,)+paths)) - -class TestAppendpath: - - def test_1(self): - assert_equal(appendpath('prefix', 'name'), join('prefix', 'name')) - assert_equal(appendpath('/prefix', 'name'), ajoin('prefix', 'name')) - assert_equal(appendpath('/prefix', '/name'), ajoin('prefix', 'name')) - assert_equal(appendpath('prefix', '/name'), join('prefix', 'name')) - - def test_2(self): - assert_equal(appendpath('prefix/sub', 'name'), - join('prefix', 'sub', 'name')) - assert_equal(appendpath('prefix/sub', 'sup/name'), - join('prefix', 'sub', 'sup', 'name')) - assert_equal(appendpath('/prefix/sub', '/prefix/name'), - ajoin('prefix', 'sub', 'name')) - - def test_3(self): - assert_equal(appendpath('/prefix/sub', '/prefix/sup/name'), - ajoin('prefix', 'sub', 'sup', 'name')) - assert_equal(appendpath('/prefix/sub/sub2', '/prefix/sup/sup2/name'), - ajoin('prefix', 'sub', 'sub2', 'sup', 'sup2', 'name')) - assert_equal(appendpath('/prefix/sub/sub2', '/prefix/sub/sup/name'), - ajoin('prefix', 'sub', 'sub2', 'sup', 'name')) - -class TestMinrelpath: - - def test_1(self): - n = lambda path: path.replace('/', sep) - assert_equal(minrelpath(n('aa/bb')), n('aa/bb')) - assert_equal(minrelpath('..'), '..') - assert_equal(minrelpath(n('aa/..')), '') - assert_equal(minrelpath(n('aa/../bb')), 'bb') - assert_equal(minrelpath(n('aa/bb/..')), 'aa') - assert_equal(minrelpath(n('aa/bb/../..')), '') - assert_equal(minrelpath(n('aa/bb/../cc/../dd')), n('aa/dd')) - assert_equal(minrelpath(n('.././..')), n('../..')) - assert_equal(minrelpath(n('aa/bb/.././../dd')), n('dd')) - -class TestGpaths: - - def test_gpaths(self): - local_path = minrelpath(join(dirname(__file__), '..')) - ls = gpaths('command/*.py', local_path) - assert_(join(local_path, 'command', 'build_src.py') in ls, repr(ls)) - f = gpaths('system_info.py', local_path) - assert_(join(local_path, 'system_info.py') == f[0], repr(f)) - -class TestSharedExtension: - - def test_get_shared_lib_extension(self): - import sys - ext = get_shared_lib_extension(is_python_ext=False) - if sys.platform.startswith('linux'): - assert_equal(ext, '.so') - elif sys.platform.startswith('gnukfreebsd'): - assert_equal(ext, '.so') - elif sys.platform.startswith('darwin'): - assert_equal(ext, '.dylib') - elif sys.platform.startswith('win'): - assert_equal(ext, '.dll') - # just check for no crash - assert_(get_shared_lib_extension(is_python_ext=True)) - - -@pytest.mark.skipif( - IS_EDITABLE, - reason="`get_info` .ini lookup method incompatible with editable install" -) -def test_installed_npymath_ini(): - # Regression test for gh-7707. If npymath.ini wasn't installed, then this - # will give an error. - info = get_info('npymath') - - assert isinstance(info, dict) - assert "define_macros" in info diff --git a/numpy/distutils/tests/test_npy_pkg_config.py b/numpy/distutils/tests/test_npy_pkg_config.py deleted file mode 100644 index b287ebe2e832..000000000000 --- a/numpy/distutils/tests/test_npy_pkg_config.py +++ /dev/null @@ -1,84 +0,0 @@ -import os - -from numpy.distutils.npy_pkg_config import read_config, parse_flags -from numpy.testing import temppath, assert_ - -simple = """\ -[meta] -Name = foo -Description = foo lib -Version = 0.1 - -[default] -cflags = -I/usr/include -libs = -L/usr/lib -""" -simple_d = {'cflags': '-I/usr/include', 'libflags': '-L/usr/lib', - 'version': '0.1', 'name': 'foo'} - -simple_variable = """\ -[meta] -Name = foo -Description = foo lib -Version = 0.1 - -[variables] -prefix = /foo/bar -libdir = ${prefix}/lib -includedir = ${prefix}/include - -[default] -cflags = -I${includedir} -libs = -L${libdir} -""" -simple_variable_d = {'cflags': '-I/foo/bar/include', 'libflags': '-L/foo/bar/lib', - 'version': '0.1', 'name': 'foo'} - -class TestLibraryInfo: - def test_simple(self): - with temppath('foo.ini') as path: - with open(path, 'w') as f: - f.write(simple) - pkg = os.path.splitext(path)[0] - out = read_config(pkg) - - assert_(out.cflags() == simple_d['cflags']) - assert_(out.libs() == simple_d['libflags']) - assert_(out.name == simple_d['name']) - assert_(out.version == simple_d['version']) - - def test_simple_variable(self): - with temppath('foo.ini') as path: - with open(path, 'w') as f: - f.write(simple_variable) - pkg = os.path.splitext(path)[0] - out = read_config(pkg) - - assert_(out.cflags() == simple_variable_d['cflags']) - assert_(out.libs() == simple_variable_d['libflags']) - assert_(out.name == simple_variable_d['name']) - assert_(out.version == simple_variable_d['version']) - out.vars['prefix'] = '/Users/david' - assert_(out.cflags() == '-I/Users/david/include') - -class TestParseFlags: - def test_simple_cflags(self): - d = parse_flags("-I/usr/include") - assert_(d['include_dirs'] == ['/usr/include']) - - d = parse_flags("-I/usr/include -DFOO") - assert_(d['include_dirs'] == ['/usr/include']) - assert_(d['macros'] == ['FOO']) - - d = parse_flags("-I /usr/include -DFOO") - assert_(d['include_dirs'] == ['/usr/include']) - assert_(d['macros'] == ['FOO']) - - def test_simple_lflags(self): - d = parse_flags("-L/usr/lib -lfoo -L/usr/lib -lbar") - assert_(d['library_dirs'] == ['/usr/lib', '/usr/lib']) - assert_(d['libraries'] == ['foo', 'bar']) - - d = parse_flags("-L /usr/lib -lfoo -L/usr/lib -lbar") - assert_(d['library_dirs'] == ['/usr/lib', '/usr/lib']) - assert_(d['libraries'] == ['foo', 'bar']) diff --git a/numpy/distutils/tests/test_shell_utils.py b/numpy/distutils/tests/test_shell_utils.py deleted file mode 100644 index 696d38ddd66a..000000000000 --- a/numpy/distutils/tests/test_shell_utils.py +++ /dev/null @@ -1,79 +0,0 @@ -import pytest -import subprocess -import json -import sys - -from numpy.distutils import _shell_utils -from numpy.testing import IS_WASM - -argv_cases = [ - [r'exe'], - [r'path/exe'], - [r'path\exe'], - [r'\\server\path\exe'], - [r'path to/exe'], - [r'path to\exe'], - - [r'exe', '--flag'], - [r'path/exe', '--flag'], - [r'path\exe', '--flag'], - [r'path to/exe', '--flag'], - [r'path to\exe', '--flag'], - - # flags containing literal quotes in their name - [r'path to/exe', '--flag-"quoted"'], - [r'path to\exe', '--flag-"quoted"'], - [r'path to/exe', '"--flag-quoted"'], - [r'path to\exe', '"--flag-quoted"'], -] - - -@pytest.fixture(params=[ - _shell_utils.WindowsParser, - _shell_utils.PosixParser -]) -def Parser(request): - return request.param - - -@pytest.fixture -def runner(Parser): - if Parser != _shell_utils.NativeParser: - pytest.skip('Unable to run with non-native parser') - - if Parser == _shell_utils.WindowsParser: - return lambda cmd: subprocess.check_output(cmd) - elif Parser == _shell_utils.PosixParser: - # posix has no non-shell string parsing - return lambda cmd: subprocess.check_output(cmd, shell=True) - else: - raise NotImplementedError - - -@pytest.mark.skipif(IS_WASM, reason="Cannot start subprocess") -@pytest.mark.parametrize('argv', argv_cases) -def test_join_matches_subprocess(Parser, runner, argv): - """ - Test that join produces strings understood by subprocess - """ - # invoke python to return its arguments as json - cmd = [ - sys.executable, '-c', - 'import json, sys; print(json.dumps(sys.argv[1:]))' - ] - joined = Parser.join(cmd + argv) - json_out = runner(joined).decode() - assert json.loads(json_out) == argv - - -@pytest.mark.skipif(IS_WASM, reason="Cannot start subprocess") -@pytest.mark.parametrize('argv', argv_cases) -def test_roundtrip(Parser, argv): - """ - Test that split is the inverse operation of join - """ - try: - joined = Parser.join(argv) - assert argv == Parser.split(joined) - except NotImplementedError: - pytest.skip("Not implemented") diff --git a/numpy/distutils/tests/test_system_info.py b/numpy/distutils/tests/test_system_info.py deleted file mode 100644 index 5887abea76bd..000000000000 --- a/numpy/distutils/tests/test_system_info.py +++ /dev/null @@ -1,334 +0,0 @@ -import os -import shutil -import pytest -from tempfile import mkstemp, mkdtemp -from subprocess import Popen, PIPE -import importlib.metadata -from distutils.errors import DistutilsError - -from numpy.testing import assert_, assert_equal, assert_raises -from numpy.distutils import ccompiler, customized_ccompiler -from numpy.distutils.system_info import system_info, ConfigParser, mkl_info -from numpy.distutils.system_info import AliasedOptionError -from numpy.distutils.system_info import default_lib_dirs, default_include_dirs -from numpy.distutils import _shell_utils - - -try: - if importlib.metadata.version('setuptools') >= '60': - # pkg-resources gives deprecation warnings, and there may be more - # issues. We only support setuptools <60 - pytest.skip("setuptools is too new", allow_module_level=True) -except importlib.metadata.PackageNotFoundError: - # we don't require `setuptools`; if it is not found, continue - pass - - -def get_class(name, notfound_action=1): - """ - notfound_action: - 0 - do nothing - 1 - display warning message - 2 - raise error - """ - cl = {'temp1': Temp1Info, - 'temp2': Temp2Info, - 'duplicate_options': DuplicateOptionInfo, - }.get(name.lower(), _system_info) - return cl() - -simple_site = """ -[ALL] -library_dirs = {dir1:s}{pathsep:s}{dir2:s} -libraries = {lib1:s},{lib2:s} -extra_compile_args = -I/fake/directory -I"/path with/spaces" -Os -runtime_library_dirs = {dir1:s} - -[temp1] -library_dirs = {dir1:s} -libraries = {lib1:s} -runtime_library_dirs = {dir1:s} - -[temp2] -library_dirs = {dir2:s} -libraries = {lib2:s} -extra_link_args = -Wl,-rpath={lib2_escaped:s} -rpath = {dir2:s} - -[duplicate_options] -mylib_libs = {lib1:s} -libraries = {lib2:s} -""" -site_cfg = simple_site - -fakelib_c_text = """ -/* This file is generated from numpy/distutils/testing/test_system_info.py */ -#include -void foo(void) { - printf("Hello foo"); -} -void bar(void) { - printf("Hello bar"); -} -""" - -def have_compiler(): - """ Return True if there appears to be an executable compiler - """ - compiler = customized_ccompiler() - try: - cmd = compiler.compiler # Unix compilers - except AttributeError: - try: - if not compiler.initialized: - compiler.initialize() # MSVC is different - except (DistutilsError, ValueError): - return False - cmd = [compiler.cc] - try: - p = Popen(cmd, stdout=PIPE, stderr=PIPE) - p.stdout.close() - p.stderr.close() - p.wait() - except OSError: - return False - return True - - -HAVE_COMPILER = have_compiler() - - -class _system_info(system_info): - - def __init__(self, - default_lib_dirs=default_lib_dirs, - default_include_dirs=default_include_dirs, - verbosity=1, - ): - self.__class__.info = {} - self.local_prefixes = [] - defaults = {'library_dirs': '', - 'include_dirs': '', - 'runtime_library_dirs': '', - 'rpath': '', - 'src_dirs': '', - 'search_static_first': "0", - 'extra_compile_args': '', - 'extra_link_args': ''} - self.cp = ConfigParser(defaults) - # We have to parse the config files afterwards - # to have a consistent temporary filepath - - def _check_libs(self, lib_dirs, libs, opt_libs, exts): - """Override _check_libs to return with all dirs """ - info = {'libraries': libs, 'library_dirs': lib_dirs} - return info - - -class Temp1Info(_system_info): - """For testing purposes""" - section = 'temp1' - - -class Temp2Info(_system_info): - """For testing purposes""" - section = 'temp2' - -class DuplicateOptionInfo(_system_info): - """For testing purposes""" - section = 'duplicate_options' - - -class TestSystemInfoReading: - - def setup_method(self): - """ Create the libraries """ - # Create 2 sources and 2 libraries - self._dir1 = mkdtemp() - self._src1 = os.path.join(self._dir1, 'foo.c') - self._lib1 = os.path.join(self._dir1, 'libfoo.so') - self._dir2 = mkdtemp() - self._src2 = os.path.join(self._dir2, 'bar.c') - self._lib2 = os.path.join(self._dir2, 'libbar.so') - # Update local site.cfg - global simple_site, site_cfg - site_cfg = simple_site.format( - dir1=self._dir1, - lib1=self._lib1, - dir2=self._dir2, - lib2=self._lib2, - pathsep=os.pathsep, - lib2_escaped=_shell_utils.NativeParser.join([self._lib2]) - ) - # Write site.cfg - fd, self._sitecfg = mkstemp() - os.close(fd) - with open(self._sitecfg, 'w') as fd: - fd.write(site_cfg) - # Write the sources - with open(self._src1, 'w') as fd: - fd.write(fakelib_c_text) - with open(self._src2, 'w') as fd: - fd.write(fakelib_c_text) - # We create all class-instances - - def site_and_parse(c, site_cfg): - c.files = [site_cfg] - c.parse_config_files() - return c - self.c_default = site_and_parse(get_class('default'), self._sitecfg) - self.c_temp1 = site_and_parse(get_class('temp1'), self._sitecfg) - self.c_temp2 = site_and_parse(get_class('temp2'), self._sitecfg) - self.c_dup_options = site_and_parse(get_class('duplicate_options'), - self._sitecfg) - - def teardown_method(self): - # Do each removal separately - try: - shutil.rmtree(self._dir1) - except Exception: - pass - try: - shutil.rmtree(self._dir2) - except Exception: - pass - try: - os.remove(self._sitecfg) - except Exception: - pass - - def test_all(self): - # Read in all information in the ALL block - tsi = self.c_default - assert_equal(tsi.get_lib_dirs(), [self._dir1, self._dir2]) - assert_equal(tsi.get_libraries(), [self._lib1, self._lib2]) - assert_equal(tsi.get_runtime_lib_dirs(), [self._dir1]) - extra = tsi.calc_extra_info() - assert_equal(extra['extra_compile_args'], ['-I/fake/directory', '-I/path with/spaces', '-Os']) - - def test_temp1(self): - # Read in all information in the temp1 block - tsi = self.c_temp1 - assert_equal(tsi.get_lib_dirs(), [self._dir1]) - assert_equal(tsi.get_libraries(), [self._lib1]) - assert_equal(tsi.get_runtime_lib_dirs(), [self._dir1]) - - def test_temp2(self): - # Read in all information in the temp2 block - tsi = self.c_temp2 - assert_equal(tsi.get_lib_dirs(), [self._dir2]) - assert_equal(tsi.get_libraries(), [self._lib2]) - # Now from rpath and not runtime_library_dirs - assert_equal(tsi.get_runtime_lib_dirs(key='rpath'), [self._dir2]) - extra = tsi.calc_extra_info() - assert_equal(extra['extra_link_args'], ['-Wl,-rpath=' + self._lib2]) - - def test_duplicate_options(self): - # Ensure that duplicates are raising an AliasedOptionError - tsi = self.c_dup_options - assert_raises(AliasedOptionError, tsi.get_option_single, "mylib_libs", "libraries") - assert_equal(tsi.get_libs("mylib_libs", [self._lib1]), [self._lib1]) - assert_equal(tsi.get_libs("libraries", [self._lib2]), [self._lib2]) - - @pytest.mark.skipif(not HAVE_COMPILER, reason="Missing compiler") - def test_compile1(self): - # Compile source and link the first source - c = customized_ccompiler() - previousDir = os.getcwd() - try: - # Change directory to not screw up directories - os.chdir(self._dir1) - c.compile([os.path.basename(self._src1)], output_dir=self._dir1) - # Ensure that the object exists - assert_(os.path.isfile(self._src1.replace('.c', '.o')) or - os.path.isfile(self._src1.replace('.c', '.obj'))) - finally: - os.chdir(previousDir) - - @pytest.mark.skipif(not HAVE_COMPILER, reason="Missing compiler") - @pytest.mark.skipif('msvc' in repr(ccompiler.new_compiler()), - reason="Fails with MSVC compiler ") - def test_compile2(self): - # Compile source and link the second source - tsi = self.c_temp2 - c = customized_ccompiler() - extra_link_args = tsi.calc_extra_info()['extra_link_args'] - previousDir = os.getcwd() - try: - # Change directory to not screw up directories - os.chdir(self._dir2) - c.compile([os.path.basename(self._src2)], output_dir=self._dir2, - extra_postargs=extra_link_args) - # Ensure that the object exists - assert_(os.path.isfile(self._src2.replace('.c', '.o'))) - finally: - os.chdir(previousDir) - - HAS_MKL = "mkl_rt" in mkl_info().calc_libraries_info().get("libraries", []) - - @pytest.mark.xfail(HAS_MKL, reason=("`[DEFAULT]` override doesn't work if " - "numpy is built with MKL support")) - def test_overrides(self): - previousDir = os.getcwd() - cfg = os.path.join(self._dir1, 'site.cfg') - shutil.copy(self._sitecfg, cfg) - try: - os.chdir(self._dir1) - # Check that the '[ALL]' section does not override - # missing values from other sections - info = mkl_info() - lib_dirs = info.cp['ALL']['library_dirs'].split(os.pathsep) - assert info.get_lib_dirs() != lib_dirs - - # But if we copy the values to a '[mkl]' section the value - # is correct - with open(cfg) as fid: - mkl = fid.read().replace('[ALL]', '[mkl]', 1) - with open(cfg, 'w') as fid: - fid.write(mkl) - info = mkl_info() - assert info.get_lib_dirs() == lib_dirs - - # Also, the values will be taken from a section named '[DEFAULT]' - with open(cfg) as fid: - dflt = fid.read().replace('[mkl]', '[DEFAULT]', 1) - with open(cfg, 'w') as fid: - fid.write(dflt) - info = mkl_info() - assert info.get_lib_dirs() == lib_dirs - finally: - os.chdir(previousDir) - - -def test_distutils_parse_env_order(monkeypatch): - from numpy.distutils.system_info import _parse_env_order - env = 'NPY_TESTS_DISTUTILS_PARSE_ENV_ORDER' - - base_order = list('abcdef') - - monkeypatch.setenv(env, 'b,i,e,f') - order, unknown = _parse_env_order(base_order, env) - assert len(order) == 3 - assert order == list('bef') - assert len(unknown) == 1 - - # For when LAPACK/BLAS optimization is disabled - monkeypatch.setenv(env, '') - order, unknown = _parse_env_order(base_order, env) - assert len(order) == 0 - assert len(unknown) == 0 - - for prefix in '^!': - monkeypatch.setenv(env, f'{prefix}b,i,e') - order, unknown = _parse_env_order(base_order, env) - assert len(order) == 4 - assert order == list('acdf') - assert len(unknown) == 1 - - with pytest.raises(ValueError): - monkeypatch.setenv(env, 'b,^e,i') - _parse_env_order(base_order, env) - - with pytest.raises(ValueError): - monkeypatch.setenv(env, '!b,^e,i') - _parse_env_order(base_order, env) diff --git a/numpy/distutils/tests/utilities.py b/numpy/distutils/tests/utilities.py deleted file mode 100644 index 5016a83d2164..000000000000 --- a/numpy/distutils/tests/utilities.py +++ /dev/null @@ -1,90 +0,0 @@ -# Kanged out of numpy.f2py.tests.util for test_build_ext -from numpy.testing import IS_WASM -import textwrap -import shutil -import tempfile -import os -import re -import subprocess -import sys - -# -# Check if compilers are available at all... -# - -_compiler_status = None - - -def _get_compiler_status(): - global _compiler_status - if _compiler_status is not None: - return _compiler_status - - _compiler_status = (False, False, False) - if IS_WASM: - # Can't run compiler from inside WASM. - return _compiler_status - - # XXX: this is really ugly. But I don't know how to invoke Distutils - # in a safer way... - code = textwrap.dedent( - f"""\ - import os - import sys - sys.path = {repr(sys.path)} - - def configuration(parent_name='',top_path=None): - global config - from numpy.distutils.misc_util import Configuration - config = Configuration('', parent_name, top_path) - return config - - from numpy.distutils.core import setup - setup(configuration=configuration) - - config_cmd = config.get_config_cmd() - have_c = config_cmd.try_compile('void foo() {{}}') - print('COMPILERS:%%d,%%d,%%d' %% (have_c, - config.have_f77c(), - config.have_f90c())) - sys.exit(99) - """ - ) - code = code % dict(syspath=repr(sys.path)) - - tmpdir = tempfile.mkdtemp() - try: - script = os.path.join(tmpdir, "setup.py") - - with open(script, "w") as f: - f.write(code) - - cmd = [sys.executable, "setup.py", "config"] - p = subprocess.Popen( - cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, cwd=tmpdir - ) - out, err = p.communicate() - finally: - shutil.rmtree(tmpdir) - - m = re.search(rb"COMPILERS:(\d+),(\d+),(\d+)", out) - if m: - _compiler_status = ( - bool(int(m.group(1))), - bool(int(m.group(2))), - bool(int(m.group(3))), - ) - # Finished - return _compiler_status - - -def has_c_compiler(): - return _get_compiler_status()[0] - - -def has_f77_compiler(): - return _get_compiler_status()[1] - - -def has_f90_compiler(): - return _get_compiler_status()[2] diff --git a/numpy/distutils/unixccompiler.py b/numpy/distutils/unixccompiler.py deleted file mode 100644 index 4884960fdf22..000000000000 --- a/numpy/distutils/unixccompiler.py +++ /dev/null @@ -1,141 +0,0 @@ -""" -unixccompiler - can handle very long argument lists for ar. - -""" -import os -import sys -import subprocess -import shlex - -from distutils.errors import CompileError, DistutilsExecError, LibError -from distutils.unixccompiler import UnixCCompiler -from numpy.distutils.ccompiler import replace_method -from numpy.distutils.misc_util import _commandline_dep_string -from numpy.distutils import log - -# Note that UnixCCompiler._compile appeared in Python 2.3 -def UnixCCompiler__compile(self, obj, src, ext, cc_args, extra_postargs, pp_opts): - """Compile a single source files with a Unix-style compiler.""" - # HP ad-hoc fix, see ticket 1383 - ccomp = self.compiler_so - if ccomp[0] == 'aCC': - # remove flags that will trigger ANSI-C mode for aCC - if '-Ae' in ccomp: - ccomp.remove('-Ae') - if '-Aa' in ccomp: - ccomp.remove('-Aa') - # add flags for (almost) sane C++ handling - ccomp += ['-AA'] - self.compiler_so = ccomp - # ensure OPT environment variable is read - if 'OPT' in os.environ: - # XXX who uses this? - from sysconfig import get_config_vars - opt = shlex.join(shlex.split(os.environ['OPT'])) - gcv_opt = shlex.join(shlex.split(get_config_vars('OPT')[0])) - ccomp_s = shlex.join(self.compiler_so) - if opt not in ccomp_s: - ccomp_s = ccomp_s.replace(gcv_opt, opt) - self.compiler_so = shlex.split(ccomp_s) - llink_s = shlex.join(self.linker_so) - if opt not in llink_s: - self.linker_so = self.linker_so + shlex.split(opt) - - display = '%s: %s' % (os.path.basename(self.compiler_so[0]), src) - - # gcc style automatic dependencies, outputs a makefile (-MF) that lists - # all headers needed by a c file as a side effect of compilation (-MMD) - if getattr(self, '_auto_depends', False): - deps = ['-MMD', '-MF', obj + '.d'] - else: - deps = [] - - try: - self.spawn(self.compiler_so + cc_args + [src, '-o', obj] + deps + - extra_postargs, display = display) - except DistutilsExecError as e: - msg = str(e) - raise CompileError(msg) from None - - # add commandline flags to dependency file - if deps: - # After running the compiler, the file created will be in EBCDIC - # but will not be tagged as such. This tags it so the file does not - # have multiple different encodings being written to it - if sys.platform == 'zos': - subprocess.check_output(['chtag', '-tc', 'IBM1047', obj + '.d']) - with open(obj + '.d', 'a') as f: - f.write(_commandline_dep_string(cc_args, extra_postargs, pp_opts)) - -replace_method(UnixCCompiler, '_compile', UnixCCompiler__compile) - - -def UnixCCompiler_create_static_lib(self, objects, output_libname, - output_dir=None, debug=0, target_lang=None): - """ - Build a static library in a separate sub-process. - - Parameters - ---------- - objects : list or tuple of str - List of paths to object files used to build the static library. - output_libname : str - The library name as an absolute or relative (if `output_dir` is used) - path. - output_dir : str, optional - The path to the output directory. Default is None, in which case - the ``output_dir`` attribute of the UnixCCompiler instance. - debug : bool, optional - This parameter is not used. - target_lang : str, optional - This parameter is not used. - - Returns - ------- - None - - """ - objects, output_dir = self._fix_object_args(objects, output_dir) - - output_filename = \ - self.library_filename(output_libname, output_dir=output_dir) - - if self._need_link(objects, output_filename): - try: - # previous .a may be screwed up; best to remove it first - # and recreate. - # Also, ar on OS X doesn't handle updating universal archives - os.unlink(output_filename) - except OSError: - pass - self.mkpath(os.path.dirname(output_filename)) - tmp_objects = objects + self.objects - while tmp_objects: - objects = tmp_objects[:50] - tmp_objects = tmp_objects[50:] - display = '%s: adding %d object files to %s' % ( - os.path.basename(self.archiver[0]), - len(objects), output_filename) - self.spawn(self.archiver + [output_filename] + objects, - display = display) - - # Not many Unices required ranlib anymore -- SunOS 4.x is, I - # think the only major Unix that does. Maybe we need some - # platform intelligence here to skip ranlib if it's not - # needed -- or maybe Python's configure script took care of - # it for us, hence the check for leading colon. - if self.ranlib: - display = '%s:@ %s' % (os.path.basename(self.ranlib[0]), - output_filename) - try: - self.spawn(self.ranlib + [output_filename], - display = display) - except DistutilsExecError as e: - msg = str(e) - raise LibError(msg) from None - else: - log.debug("skipping %s (up-to-date)", output_filename) - return - -replace_method(UnixCCompiler, 'create_static_lib', - UnixCCompiler_create_static_lib) diff --git a/numpy/doc/ufuncs.py b/numpy/doc/ufuncs.py index 7324168e1dc8..f97e9ff3f80c 100644 --- a/numpy/doc/ufuncs.py +++ b/numpy/doc/ufuncs.py @@ -113,7 +113,7 @@ output argument is used, the ufunc still returns a reference to the result. >>> x = np.arange(2) - >>> np.add(np.arange(2, dtype=float), np.arange(2, dtype=float), x, + >>> np.add(np.arange(2, dtype=np.float64), np.arange(2, dtype=np.float64), x, ... casting='unsafe') array([0, 2]) >>> x diff --git a/numpy/dtypes.pyi b/numpy/dtypes.pyi index 007dc643c0e3..920e23c85f1f 100644 --- a/numpy/dtypes.pyi +++ b/numpy/dtypes.pyi @@ -2,68 +2,69 @@ from typing import ( Any, Generic, + Literal as L, LiteralString, Never, NoReturn, Self, - TypeAlias, final, overload, type_check_only, ) -from typing import Literal as L - from typing_extensions import TypeVar import numpy as np __all__ = [ # noqa: RUF022 - 'BoolDType', - 'Int8DType', - 'ByteDType', - 'UInt8DType', - 'UByteDType', - 'Int16DType', - 'ShortDType', - 'UInt16DType', - 'UShortDType', - 'Int32DType', - 'IntDType', - 'UInt32DType', - 'UIntDType', - 'Int64DType', - 'LongDType', - 'UInt64DType', - 'ULongDType', - 'LongLongDType', - 'ULongLongDType', - 'Float16DType', - 'Float32DType', - 'Float64DType', - 'LongDoubleDType', - 'Complex64DType', - 'Complex128DType', - 'CLongDoubleDType', - 'ObjectDType', - 'BytesDType', - 'StrDType', - 'VoidDType', - 'DateTime64DType', - 'TimeDelta64DType', - 'StringDType', + "BoolDType", + "Int8DType", + "ByteDType", + "UInt8DType", + "UByteDType", + "Int16DType", + "ShortDType", + "UInt16DType", + "UShortDType", + "Int32DType", + "IntDType", + "UInt32DType", + "UIntDType", + "Int64DType", + "LongDType", + "UInt64DType", + "ULongDType", + "LongLongDType", + "ULongLongDType", + "Float16DType", + "Float32DType", + "Float64DType", + "LongDoubleDType", + "Complex64DType", + "Complex128DType", + "CLongDoubleDType", + "ObjectDType", + "BytesDType", + "StrDType", + "VoidDType", + "DateTime64DType", + "TimeDelta64DType", + "StringDType", ] -# Helper base classes (typing-only) +# Type parameters -_ScalarT_co = TypeVar("_ScalarT_co", bound=np.generic, covariant=True) +_ItemSizeT_co = TypeVar("_ItemSizeT_co", bound=int, default=int, covariant=True) +_NaObjectT_co = TypeVar("_NaObjectT_co", default=Never, covariant=True) + +# Helper base classes (typing-only) @type_check_only -class _SimpleDType(np.dtype[_ScalarT_co], Generic[_ScalarT_co]): # type: ignore[misc] # pyright: ignore[reportGeneralTypeIssues] +class _SimpleDType[ScalarT: np.generic](np.dtype[ScalarT]): # type: ignore[misc] # pyright: ignore[reportGeneralTypeIssues] names: None # pyright: ignore[reportIncompatibleVariableOverride] def __new__(cls, /) -> Self: ... def __getitem__(self, key: Any, /) -> NoReturn: ... @property - def base(self) -> np.dtype[_ScalarT_co]: ... + def base(self) -> np.dtype[ScalarT]: ... @property def fields(self) -> None: ... @property @@ -78,7 +79,7 @@ class _SimpleDType(np.dtype[_ScalarT_co], Generic[_ScalarT_co]): # type: ignore def subdtype(self) -> None: ... @type_check_only -class _LiteralDType(_SimpleDType[_ScalarT_co], Generic[_ScalarT_co]): # type: ignore[misc] +class _LiteralDType[ScalarT_co: np.generic](_SimpleDType[ScalarT_co]): # type: ignore[misc] @property def flags(self) -> L[0]: ... @property @@ -86,21 +87,17 @@ class _LiteralDType(_SimpleDType[_ScalarT_co], Generic[_ScalarT_co]): # type: i # Helper mixins (typing-only): -_KindT_co = TypeVar("_KindT_co", bound=LiteralString, covariant=True) -_CharT_co = TypeVar("_CharT_co", bound=LiteralString, covariant=True) -_NumT_co = TypeVar("_NumT_co", bound=int, covariant=True) - @type_check_only -class _TypeCodes(Generic[_KindT_co, _CharT_co, _NumT_co]): +class _TypeCodes[KindT: LiteralString, CharT: LiteralString, NumT: int]: @final @property - def kind(self) -> _KindT_co: ... + def kind(self) -> KindT: ... @final @property - def char(self) -> _CharT_co: ... + def char(self) -> CharT: ... @final @property - def num(self) -> _NumT_co: ... + def num(self) -> NumT: ... @type_check_only class _NoOrder: @@ -114,17 +111,14 @@ class _NativeOrder: @property def byteorder(self) -> L["="]: ... -_DataSize_co = TypeVar("_DataSize_co", bound=int, covariant=True) -_ItemSize_co = TypeVar("_ItemSize_co", bound=int, covariant=True, default=int) - @type_check_only -class _NBit(Generic[_DataSize_co, _ItemSize_co]): +class _NBit[AlignmentT: int, ItemSizeT: int]: @final @property - def alignment(self) -> _DataSize_co: ... + def alignment(self) -> AlignmentT: ... @final @property - def itemsize(self) -> _ItemSize_co: ... + def itemsize(self) -> ItemSizeT: ... @type_check_only class _8Bit(_NoOrder, _NBit[L[1], L[1]]): ... @@ -239,7 +233,7 @@ class UInt64DType( # type: ignore[misc] def str(self) -> L["u8"]: ... # Standard C-named version/alias: -# NOTE: Don't make these `Final`: it will break stubtest +# NOTE: Don't make these `Final[_]` or a `type _` it will break stubtest ByteDType = Int8DType UByteDType = UInt8DType ShortDType = Int16DType @@ -427,11 +421,11 @@ class ObjectDType( # type: ignore[misc] class BytesDType( # type: ignore[misc] _TypeCodes[L["S"], L["S"], L[18]], _NoOrder, - _NBit[L[1], _ItemSize_co], + _NBit[L[1], _ItemSizeT_co], _SimpleDType[np.bytes_], - Generic[_ItemSize_co], + Generic[_ItemSizeT_co], ): - def __new__(cls, size: _ItemSize_co, /) -> BytesDType[_ItemSize_co]: ... + def __new__[ItemSizeT: int](cls, size: ItemSizeT, /) -> BytesDType[ItemSizeT]: ... @property def hasobject(self) -> L[False]: ... @property @@ -443,11 +437,11 @@ class BytesDType( # type: ignore[misc] class StrDType( # type: ignore[misc] _TypeCodes[L["U"], L["U"], L[19]], _NativeOrder, - _NBit[L[4], _ItemSize_co], + _NBit[L[4], _ItemSizeT_co], _SimpleDType[np.str_], - Generic[_ItemSize_co], + Generic[_ItemSizeT_co], ): - def __new__(cls, size: _ItemSize_co, /) -> StrDType[_ItemSize_co]: ... + def __new__[ItemSizeT: int](cls, size: ItemSizeT, /) -> StrDType[ItemSizeT]: ... @property def hasobject(self) -> L[False]: ... @property @@ -459,12 +453,12 @@ class StrDType( # type: ignore[misc] class VoidDType( # type: ignore[misc] _TypeCodes[L["V"], L["V"], L[20]], _NoOrder, - _NBit[L[1], _ItemSize_co], + _NBit[L[1], _ItemSizeT_co], np.dtype[np.void], # pyright: ignore[reportGeneralTypeIssues] - Generic[_ItemSize_co], + Generic[_ItemSizeT_co], ): # NOTE: `VoidDType(...)` raises a `TypeError` at the moment - def __new__(cls, length: _ItemSize_co, /) -> NoReturn: ... + def __new__(cls, length: _ItemSizeT_co, /) -> NoReturn: ... @property def base(self) -> Self: ... @property @@ -484,9 +478,9 @@ class VoidDType( # type: ignore[misc] # Other: -_DateUnit: TypeAlias = L["Y", "M", "W", "D"] -_TimeUnit: TypeAlias = L["h", "m", "s", "ms", "us", "ns", "ps", "fs", "as"] -_DateTimeUnit: TypeAlias = _DateUnit | _TimeUnit +type _DateUnit = L["Y", "M", "W", "D"] +type _TimeUnit = L["h", "m", "s", "ms", "us", "ns", "ps", "fs", "as"] +type _DateTimeUnit = _DateUnit | _TimeUnit @final class DateTime64DType( # type: ignore[misc] @@ -578,8 +572,6 @@ class TimeDelta64DType( # type: ignore[misc] "m8[as]", ]: ... -_NaObjectT_co = TypeVar("_NaObjectT_co", default=Never, covariant=True) - @final class StringDType( # type: ignore[misc] _TypeCodes[L["T"], L["T"], L[2056]], diff --git a/numpy/exceptions.py b/numpy/exceptions.py index 0e8688ae9eba..cf70b4a4ce3b 100644 --- a/numpy/exceptions.py +++ b/numpy/exceptions.py @@ -7,8 +7,7 @@ .. versionadded:: NumPy 1.25 - The exceptions module is new in NumPy 1.25. Older exceptions remain - available through the main NumPy namespace for compatibility. + The exceptions module is new in NumPy 1.25. .. currentmodule:: numpy.exceptions diff --git a/numpy/exceptions.pyi b/numpy/exceptions.pyi index 9ed50927d070..4cc4eff5d321 100644 --- a/numpy/exceptions.pyi +++ b/numpy/exceptions.pyi @@ -17,9 +17,11 @@ class TooHardError(RuntimeError): ... class DTypePromotionError(TypeError): ... class AxisError(ValueError, IndexError): + __slots__ = "_msg", "axis", "ndim" + axis: int | None ndim: int | None @overload - def __init__(self, axis: str, ndim: None = ..., msg_prefix: None = ...) -> None: ... + def __init__(self, axis: str, ndim: None = None, msg_prefix: None = None) -> None: ... @overload - def __init__(self, axis: int, ndim: int, msg_prefix: str | None = ...) -> None: ... + def __init__(self, axis: int, ndim: int, msg_prefix: str | None = None) -> None: ... diff --git a/numpy/f2py/__init__.py b/numpy/f2py/__init__.py index e34dd99aec1c..f545c9c5fd84 100644 --- a/numpy/f2py/__init__.py +++ b/numpy/f2py/__init__.py @@ -26,12 +26,6 @@ def get_include(): """ Return the directory that contains the ``fortranobject.c`` and ``.h`` files. - .. note:: - - This function is not needed when building an extension with - `numpy.distutils` directly from ``.f`` and/or ``.pyf`` files - in one go. - Python extension modules built with f2py-generated code need to use ``fortranobject.c`` as a source file, and include the ``fortranobject.h`` header. This function can be used to obtain the directory containing diff --git a/numpy/f2py/__init__.pyi b/numpy/f2py/__init__.pyi index d12f47e80a7d..aa7d5918f7d2 100644 --- a/numpy/f2py/__init__.pyi +++ b/numpy/f2py/__init__.pyi @@ -1,5 +1,4 @@ -from .f2py2e import main as main -from .f2py2e import run_main +from .f2py2e import main as main, run_main __all__ = ["get_include", "run_main"] diff --git a/numpy/f2py/_backends/__init__.py b/numpy/f2py/_backends/__init__.py index e91393c14be3..beb2bab2384d 100644 --- a/numpy/f2py/_backends/__init__.py +++ b/numpy/f2py/_backends/__init__.py @@ -2,8 +2,5 @@ def f2py_build_generator(name): if name == "meson": from ._meson import MesonBackend return MesonBackend - elif name == "distutils": - from ._distutils import DistutilsBackend - return DistutilsBackend else: raise ValueError(f"Unknown backend: {name}") diff --git a/numpy/f2py/_backends/__init__.pyi b/numpy/f2py/_backends/__init__.pyi index 43625c68061f..11e3743be541 100644 --- a/numpy/f2py/_backends/__init__.pyi +++ b/numpy/f2py/_backends/__init__.pyi @@ -2,4 +2,4 @@ from typing import Literal as L from ._backend import Backend -def f2py_build_generator(name: L["distutils", "meson"]) -> Backend: ... +def f2py_build_generator(name: L["meson"]) -> Backend: ... diff --git a/numpy/f2py/_backends/_distutils.py b/numpy/f2py/_backends/_distutils.py deleted file mode 100644 index 5c8f1092b568..000000000000 --- a/numpy/f2py/_backends/_distutils.py +++ /dev/null @@ -1,76 +0,0 @@ -import os -import shutil -import sys -import warnings - -from numpy.distutils.core import Extension, setup -from numpy.distutils.misc_util import dict_append -from numpy.distutils.system_info import get_info -from numpy.exceptions import VisibleDeprecationWarning - -from ._backend import Backend - - -class DistutilsBackend(Backend): - def __init__(sef, *args, **kwargs): - warnings.warn( - "\ndistutils has been deprecated since NumPy 1.26.x\n" - "Use the Meson backend instead, or generate wrappers" - " without -c and use a custom build script", - VisibleDeprecationWarning, - stacklevel=2, - ) - super().__init__(*args, **kwargs) - - def compile(self): - num_info = {} - if num_info: - self.include_dirs.extend(num_info.get("include_dirs", [])) - ext_args = { - "name": self.modulename, - "sources": self.sources, - "include_dirs": self.include_dirs, - "library_dirs": self.library_dirs, - "libraries": self.libraries, - "define_macros": self.define_macros, - "undef_macros": self.undef_macros, - "extra_objects": self.extra_objects, - "f2py_options": self.f2py_flags, - } - - if self.sysinfo_flags: - for n in self.sysinfo_flags: - i = get_info(n) - if not i: - print( - f"No {n!r} resources found" - "in system (try `f2py --help-link`)" - ) - dict_append(ext_args, **i) - - ext = Extension(**ext_args) - - sys.argv = [sys.argv[0]] + self.setup_flags - sys.argv.extend( - [ - "build", - "--build-temp", - self.build_dir, - "--build-base", - self.build_dir, - "--build-platlib", - ".", - "--disable-optimization", - ] - ) - - if self.fc_flags: - sys.argv.extend(["config_fc"] + self.fc_flags) - if self.flib_flags: - sys.argv.extend(["build_ext"] + self.flib_flags) - - setup(ext_modules=[ext]) - - if self.remove_build_dir and os.path.exists(self.build_dir): - print(f"Removing build directory {self.build_dir}") - shutil.rmtree(self.build_dir) diff --git a/numpy/f2py/_backends/_distutils.pyi b/numpy/f2py/_backends/_distutils.pyi deleted file mode 100644 index 56bbf7e5b49a..000000000000 --- a/numpy/f2py/_backends/_distutils.pyi +++ /dev/null @@ -1,13 +0,0 @@ -from typing_extensions import deprecated, override - -from ._backend import Backend - -class DistutilsBackend(Backend): - @deprecated( - "distutils has been deprecated since NumPy 1.26.x. Use the Meson backend instead, or generate wrappers without -c and " - "use a custom build script" - ) - # NOTE: the `sef` typo matches runtime - def __init__(sef, *args: object, **kwargs: object) -> None: ... - @override - def compile(self) -> None: ... diff --git a/numpy/f2py/_backends/_meson.py b/numpy/f2py/_backends/_meson.py index cbd9b0e32729..4c498bab2f25 100644 --- a/numpy/f2py/_backends/_meson.py +++ b/numpy/f2py/_backends/_meson.py @@ -50,6 +50,7 @@ def __init__( self.pipeline = [ self.initialize_template, self.sources_substitution, + self.objects_substitution, self.deps_substitution, self.include_substitution, self.libraries_substitution, @@ -79,6 +80,11 @@ def sources_substitution(self) -> None: [f"{self.indent}'''{source}'''," for source in self.sources] ) + def objects_substitution(self) -> None: + self.substitutions["obj_list"] = ",\n".join( + [f"{self.indent}'''{obj}'''," for obj in self.objects] + ) + def deps_substitution(self) -> None: self.substitutions["dep_list"] = f",\n{self.indent}".join( [f"{self.indent}dependency('{dep}')," for dep in self.deps] @@ -186,6 +192,7 @@ def run_meson(self, build_dir: Path): def compile(self) -> None: self.sources = _prepare_sources(self.modulename, self.sources, self.build_dir) + _prepare_objects(self.modulename, self.extra_objects, self.build_dir) self.write_meson_build(self.build_dir) self.run_meson(self.build_dir) self._move_exec_to_root(self.build_dir) @@ -216,6 +223,12 @@ def _prepare_sources(mname, sources, bdir): ] return extended_sources +def _prepare_objects(mname, objects, bdir): + Path(bdir).mkdir(parents=True, exist_ok=True) + # Copy objects + for obj in objects: + if Path(obj).exists() and Path(obj).is_file(): + shutil.copy(obj, bdir) def _get_flags(fc_flags): flag_values = [] diff --git a/numpy/f2py/_backends/_meson.pyi b/numpy/f2py/_backends/_meson.pyi index b9f959537214..55ff9f7ae78d 100644 --- a/numpy/f2py/_backends/_meson.pyi +++ b/numpy/f2py/_backends/_meson.pyi @@ -1,9 +1,6 @@ from collections.abc import Callable from pathlib import Path -from typing import Final -from typing import Literal as L - -from typing_extensions import override +from typing import Final, Literal as L, override from ._backend import Backend @@ -42,6 +39,7 @@ class MesonTemplate: # def initialize_template(self) -> None: ... def sources_substitution(self) -> None: ... + def objects_substitution(self) -> None: ... def deps_substitution(self) -> None: ... def libraries_substitution(self) -> None: ... def include_substitution(self) -> None: ... diff --git a/numpy/f2py/_backends/meson.build.template b/numpy/f2py/_backends/meson.build.template index fdcc1b17ce21..58c6758cc503 100644 --- a/numpy/f2py/_backends/meson.build.template +++ b/numpy/f2py/_backends/meson.build.template @@ -43,6 +43,9 @@ ${source_list}, include_directories: [ inc_np, ${inc_list} + ], + objects: [ +${obj_list} ], dependencies : [ py_dep, diff --git a/numpy/f2py/_src_pyf.pyi b/numpy/f2py/_src_pyf.pyi index f5aecbf1decd..50ddd07bf638 100644 --- a/numpy/f2py/_src_pyf.pyi +++ b/numpy/f2py/_src_pyf.pyi @@ -1,9 +1,8 @@ import re +from _typeshed import StrOrBytesPath from collections.abc import Mapping from typing import Final -from _typeshed import StrOrBytesPath - routine_start_re: Final[re.Pattern[str]] = ... routine_end_re: Final[re.Pattern[str]] = ... function_start_re: Final[re.Pattern[str]] = ... diff --git a/numpy/f2py/auxfuncs.py b/numpy/f2py/auxfuncs.py index 6e58e6352224..a5af31d976ec 100644 --- a/numpy/f2py/auxfuncs.py +++ b/numpy/f2py/auxfuncs.py @@ -42,7 +42,7 @@ 'isunsigned_long_long', 'isunsigned_long_longarray', 'isunsigned_short', 'isunsigned_shortarray', 'l_and', 'l_not', 'l_or', 'outmess', 'replace', 'show', 'stripcomma', 'throw_error', 'isattr_value', 'getuseblocks', - 'process_f2cmap_dict', 'containscommon' + 'process_f2cmap_dict', 'containscommon', 'containsderivedtypes' ] @@ -569,6 +569,20 @@ def containscommon(rout): return 0 +def hasderivedtypes(rout): + return ('block' in rout) and rout['block'] == 'type' + + +def containsderivedtypes(rout): + if hasderivedtypes(rout): + return 1 + if hasbody(rout): + for b in rout['body']: + if hasderivedtypes(b): + return 1 + return 0 + + def containsmodule(block): if ismodule(block): return 1 diff --git a/numpy/f2py/auxfuncs.pyi b/numpy/f2py/auxfuncs.pyi index 2a0d4e106bcc..fbf0ad764aae 100644 --- a/numpy/f2py/auxfuncs.pyi +++ b/numpy/f2py/auxfuncs.pyi @@ -1,15 +1,14 @@ +from _typeshed import FileDescriptorOrPath from collections.abc import Callable, Mapping from pprint import pprint as show -from typing import Any, Final, Never, TypeAlias, TypeVar, overload -from typing import Literal as L - -from _typeshed import FileDescriptorOrPath +from typing import Any, Final, Literal as L, Never, overload from .cfuncs import errmess __all__ = [ "applyrules", "containscommon", + "containsderivedtypes", "debugcapi", "dictappend", "errmess", @@ -107,15 +106,12 @@ __all__ = [ ### -_VT = TypeVar("_VT") -_RT = TypeVar("_RT") - -_Var: TypeAlias = Mapping[str, list[str]] -_ROut: TypeAlias = Mapping[str, str] -_F2CMap: TypeAlias = Mapping[str, Mapping[str, str]] +type _Var = Mapping[str, list[str]] +type _ROut = Mapping[str, str] +type _F2CMap = Mapping[str, Mapping[str, str]] -_Bool: TypeAlias = bool | L[0, 1] -_Intent: TypeAlias = L[ +type _Bool = bool | L[0, 1] +type _Intent = L[ "INTENT_IN", "INTENT_OUT", "INTENT_INOUT", @@ -141,9 +137,9 @@ class throw_error: def __call__(self, /, var: _Var) -> Never: ... # raises F2PYError # -def l_and(*f: tuple[str, Callable[[_VT], _RT]]) -> Callable[[_VT], _RT]: ... -def l_or(*f: tuple[str, Callable[[_VT], _RT]]) -> Callable[[_VT], _RT]: ... -def l_not(f: tuple[str, Callable[[_VT], _RT]]) -> Callable[[_VT], _RT]: ... +def l_and[VT, RT](*f: tuple[str, Callable[[VT], RT]]) -> Callable[[VT], RT]: ... +def l_or[VT, RT](*f: tuple[str, Callable[[VT], RT]]) -> Callable[[VT], RT]: ... +def l_not[VT, RT](f: tuple[str, Callable[[VT], RT]]) -> Callable[[VT], RT]: ... # def outmess(t: str) -> None: ... @@ -200,11 +196,13 @@ def isintent_inplace(var: _Var) -> bool: ... def isintent_aux(var: _Var) -> bool: ... # +def containsderivedtypes(rout: _ROut) -> L[0, 1]: ... def containscommon(rout: _ROut) -> _Bool: ... def hasexternals(rout: _ROut) -> bool: ... def hasresultnote(rout: _ROut) -> _Bool: ... def hasbody(rout: _ROut) -> _Bool: ... def hascommon(rout: _ROut) -> bool: ... +def hasderivedtypes(rout: _ROut) -> bool: ... def hascallstatement(rout: _ROut) -> bool: ... def isroutine(rout: _ROut) -> bool: ... def ismodule(rout: _ROut) -> bool: ... diff --git a/numpy/f2py/cfuncs.py b/numpy/f2py/cfuncs.py index a9a56b2455f2..b2b1cad3d867 100644 --- a/numpy/f2py/cfuncs.py +++ b/numpy/f2py/cfuncs.py @@ -598,32 +598,37 @@ def errmess(s: str) -> None: return ii; }""" cfuncs['forcomb'] = """ -static struct { int nd;npy_intp *d;int *i,*i_tr,tr; } forcombcache; -static int initforcomb(npy_intp *dims,int nd,int tr) { +struct ForcombCache { int nd;npy_intp *d;int *i,*i_tr,tr; }; +static int initforcomb(struct ForcombCache *cache, npy_intp *dims,int nd,int tr) { int k; if (dims==NULL) return 0; if (nd<0) return 0; - forcombcache.nd = nd; - forcombcache.d = dims; - forcombcache.tr = tr; - if ((forcombcache.i = (int *)malloc(sizeof(int)*nd))==NULL) return 0; - if ((forcombcache.i_tr = (int *)malloc(sizeof(int)*nd))==NULL) return 0; + cache->nd = nd; + cache->d = dims; + cache->tr = tr; + + cache->i = (int *)malloc(sizeof(int)*nd); + if (cache->i==NULL) return 0; + cache->i_tr = (int *)malloc(sizeof(int)*nd); + if (cache->i_tr==NULL) {free(cache->i); return 0;}; + for (k=1;ki[k] = cache->i_tr[nd-k-1] = 0; } - forcombcache.i[0] = forcombcache.i_tr[nd-1] = -1; + cache->i[0] = cache->i_tr[nd-1] = -1; return 1; } -static int *nextforcomb(void) { +static int *nextforcomb(struct ForcombCache *cache) { + if (cache==NULL) return NULL; int j,*i,*i_tr,k; - int nd=forcombcache.nd; - if ((i=forcombcache.i) == NULL) return NULL; - if ((i_tr=forcombcache.i_tr) == NULL) return NULL; - if (forcombcache.d == NULL) return NULL; + int nd=cache->nd; + if ((i=cache->i) == NULL) return NULL; + if ((i_tr=cache->i_tr) == NULL) return NULL; + if (cache->d == NULL) return NULL; i[0]++; - if (i[0]==forcombcache.d[0]) { + if (i[0]==cache->d[0]) { j=1; - while ((jd[j]-1)) j++; if (j==nd) { free(i); free(i_tr); @@ -634,7 +639,7 @@ def errmess(s: str) -> None: i_tr[nd-j-1]++; } else i_tr[nd-1]++; - if (forcombcache.tr) return i_tr; + if (cache->tr) return i_tr; return i; }""" needs['try_pyarr_from_string'] = ['STRINGCOPYN', 'PRINTPYOBJERR', 'string'] @@ -1047,9 +1052,12 @@ def errmess(s: str) -> None: PyArray_ScalarAsCtype(obj, v); return 1; } - else if (PyArray_Check(obj) && PyArray_TYPE(obj) == NPY_LONGDOUBLE) { - (*v) = *((npy_longdouble *)PyArray_DATA(obj)); - return 1; + else if (PyArray_Check(obj)) { + PyArrayObject *arr = (PyArrayObject *)obj; + if (PyArray_TYPE(arr) == NPY_LONGDOUBLE) { + (*v) = *((npy_longdouble *)PyArray_DATA(arr)); + return 1; + } } } if (double_from_pyobj(&d, obj, errmess)) { @@ -1131,10 +1139,13 @@ def errmess(s: str) -> None: PyArray_ScalarAsCtype(obj, v); return 1; } - else if (PyArray_Check(obj) && PyArray_TYPE(obj)==NPY_CLONGDOUBLE) { - (*v).r = npy_creall(*(((npy_clongdouble *)PyArray_DATA(obj)))); - (*v).i = npy_cimagl(*(((npy_clongdouble *)PyArray_DATA(obj)))); - return 1; + else if (PyArray_Check(obj)) { + PyArrayObject *arr = (PyArrayObject *)obj; + if (PyArray_TYPE(arr)==NPY_CLONGDOUBLE) { + (*v).r = npy_creall(*(((npy_clongdouble *)PyArray_DATA(arr)))); + (*v).i = npy_cimagl(*(((npy_clongdouble *)PyArray_DATA(arr)))); + return 1; + } } } if (complex_double_from_pyobj(&cd,obj,errmess)) { diff --git a/numpy/f2py/cfuncs.pyi b/numpy/f2py/cfuncs.pyi index 5887177752c3..2187368797a4 100644 --- a/numpy/f2py/cfuncs.pyi +++ b/numpy/f2py/cfuncs.pyi @@ -1,11 +1,11 @@ -from typing import Final, TypeAlias +from typing import Final from .__version__ import version ### -_NeedListDict: TypeAlias = dict[str, list[str]] -_NeedDict: TypeAlias = dict[str, str] +type _NeedListDict = dict[str, list[str]] +type _NeedDict = dict[str, str] ### diff --git a/numpy/f2py/crackfortran.pyi b/numpy/f2py/crackfortran.pyi index 6b08f8784f01..09213e156636 100644 --- a/numpy/f2py/crackfortran.pyi +++ b/numpy/f2py/crackfortran.pyi @@ -1,20 +1,16 @@ import re -from collections.abc import Callable, Iterable, Mapping -from typing import IO, Any, Concatenate, Final, Never, ParamSpec, TypeAlias, overload -from typing import Literal as L - from _typeshed import StrOrBytesPath, StrPath +from collections.abc import Callable, Iterable, Mapping +from typing import IO, Any, Concatenate, Final, Literal as L, Never, overload from .__version__ import version from .auxfuncs import isintent_dict as isintent_dict ### -_Tss = ParamSpec("_Tss") - -_VisitResult: TypeAlias = list[Any] | dict[str, Any] | None -_VisitItem: TypeAlias = tuple[str | None, _VisitResult] -_VisitFunc: TypeAlias = Callable[Concatenate[_VisitItem, list[_VisitItem], _VisitResult, _Tss], _VisitItem | None] +type _VisitResult = list[Any] | dict[str, Any] | None +type _VisitItem = tuple[str | None, _VisitResult] +type _VisitFunc[**Tss] = Callable[Concatenate[_VisitItem, list[_VisitItem], _VisitResult, Tss], _VisitItem | None] ### @@ -235,13 +231,13 @@ def crackfortran(files: StrOrBytesPath | Iterable[StrOrBytesPath]) -> list[dict[ def crack2fortran(block: Mapping[str, Any]) -> str: ... # -def traverse( +def traverse[**Tss]( obj: tuple[str | None, _VisitResult], - visit: _VisitFunc[_Tss], + visit: _VisitFunc[Tss], parents: list[tuple[str | None, _VisitResult]] = [], result: list[Any] | dict[str, Any] | None = None, - *args: _Tss.args, - **kwargs: _Tss.kwargs, + *args: Tss.args, + **kwargs: Tss.kwargs, ) -> _VisitItem | _VisitResult: ... # diff --git a/numpy/f2py/diagnose.py b/numpy/f2py/diagnose.py index 7eb1697cc787..3e2c53b0ec1d 100644 --- a/numpy/f2py/diagnose.py +++ b/numpy/f2py/diagnose.py @@ -23,10 +23,10 @@ def run(): try: import numpy - has_newnumpy = 1 + has_numpy = 1 except ImportError as e: - print('Failed to import new numpy:', e) - has_newnumpy = 0 + print('Failed to import numpy:', e) + has_numpy = 0 try: from numpy.f2py import f2py2e @@ -35,20 +35,9 @@ def run(): print('Failed to import f2py2e:', e) has_f2py2e = 0 - try: - import numpy.distutils - has_numpy_distutils = 2 - except ImportError: - try: - import numpy_distutils - has_numpy_distutils = 1 - except ImportError as e: - print('Failed to import numpy_distutils:', e) - has_numpy_distutils = 0 - - if has_newnumpy: + if has_numpy: try: - print(f'Found new numpy version {numpy.__version__!r} in {numpy.__file__}') + print(f'Found numpy version {numpy.__version__!r} in {numpy.__file__}') except Exception as msg: print('error:', msg) print('------') @@ -61,87 +50,6 @@ def run(): print('error:', msg) print('------') - if has_numpy_distutils: - try: - if has_numpy_distutils == 2: - print('Found numpy.distutils version %r in %r' % ( - numpy.distutils.__version__, - numpy.distutils.__file__)) - else: - print('Found numpy_distutils version %r in %r' % ( - numpy_distutils.numpy_distutils_version.numpy_distutils_version, - numpy_distutils.__file__)) - print('------') - except Exception as msg: - print('error:', msg) - print('------') - try: - if has_numpy_distutils == 1: - print( - 'Importing numpy_distutils.command.build_flib ...', end=' ') - import numpy_distutils.command.build_flib as build_flib - print('ok') - print('------') - try: - print( - 'Checking availability of supported Fortran compilers:') - for compiler_class in build_flib.all_compilers: - compiler_class(verbose=1).is_available() - print('------') - except Exception as msg: - print('error:', msg) - print('------') - except Exception as msg: - print( - 'error:', msg, '(ignore it, build_flib is obsolete for numpy.distutils 0.2.2 and up)') - print('------') - try: - if has_numpy_distutils == 2: - print('Importing numpy.distutils.fcompiler ...', end=' ') - import numpy.distutils.fcompiler as fcompiler - else: - print('Importing numpy_distutils.fcompiler ...', end=' ') - import numpy_distutils.fcompiler as fcompiler - print('ok') - print('------') - try: - print('Checking availability of supported Fortran compilers:') - fcompiler.show_fcompilers() - print('------') - except Exception as msg: - print('error:', msg) - print('------') - except Exception as msg: - print('error:', msg) - print('------') - try: - if has_numpy_distutils == 2: - print('Importing numpy.distutils.cpuinfo ...', end=' ') - from numpy.distutils.cpuinfo import cpuinfo - print('ok') - print('------') - else: - try: - print( - 'Importing numpy_distutils.command.cpuinfo ...', end=' ') - from numpy_distutils.command.cpuinfo import cpuinfo - print('ok') - print('------') - except Exception as msg: - print('error:', msg, '(ignore it)') - print('Importing numpy_distutils.cpuinfo ...', end=' ') - from numpy_distutils.cpuinfo import cpuinfo - print('ok') - print('------') - cpu = cpuinfo() - print('CPU information:', end=' ') - for name in dir(cpuinfo): - if name[0] == '_' and name[1] != '_' and getattr(cpu, name[1:])(): - print(name[1:], end=' ') - print('------') - except Exception as msg: - print('error:', msg) - print('------') os.chdir(_path) diff --git a/numpy/f2py/diagnose.pyi b/numpy/f2py/diagnose.pyi index 29cc2b4988b3..b88194ac6bff 100644 --- a/numpy/f2py/diagnose.pyi +++ b/numpy/f2py/diagnose.pyi @@ -1,4 +1 @@ -from _typeshed import StrOrBytesPath - -def run_command(cmd: StrOrBytesPath) -> None: ... def run() -> None: ... diff --git a/numpy/f2py/f2py2e.py b/numpy/f2py/f2py2e.py index 459299f8e127..eb5a39e088ff 100644 --- a/numpy/f2py/f2py2e.py +++ b/numpy/f2py/f2py2e.py @@ -36,7 +36,6 @@ # outmess=sys.stdout.write show = pprint.pprint outmess = auxfuncs.outmess -MESON_ONLY_VER = (sys.version_info >= (3, 12)) __usage__ =\ f"""Usage: @@ -117,10 +116,6 @@ --include-paths ::... Search include files from the given directories. - --help-link [..] List system resources found by system_info.py. See also - --link- switch below. [..] is optional list - of resources names. E.g. try 'f2py --help-link lapack_opt'. - --f2cmap Load Fortran-to-Python KIND specification from the given file. Default: .f2py_f2cmap in current directory. @@ -378,6 +373,8 @@ def callcrackfortran(files, options): mod['gil_used'] = 'Py_MOD_GIL_USED' else: mod['gil_used'] = 'Py_MOD_GIL_NOT_USED' + # gh-26718 Reset global + crackfortran.f77modulename = '' return postlist @@ -581,7 +578,7 @@ def preparse_sysargv(): sys.argv = [sys.argv[0]] + remaining_argv backend_key = args.backend - if MESON_ONLY_VER and backend_key == 'distutils': + if backend_key == 'distutils': outmess("Cannot use distutils backend with Python>=3.12," " using meson backend instead.\n") backend_key = "meson" @@ -655,35 +652,16 @@ def run_compile(): reg_distutils_flags = re.compile(r'--((f(77|90)exec|opt|arch)=|(debug|noopt|noarch|help-fcompiler))') fc_flags = [_m for _m in sys.argv[1:] if reg_f77_f90_flags.match(_m)] distutils_flags = [_m for _m in sys.argv[1:] if reg_distutils_flags.match(_m)] - if not (MESON_ONLY_VER or backend_key == 'meson'): - fc_flags.extend(distutils_flags) sys.argv = [_m for _m in sys.argv if _m not in (fc_flags + distutils_flags)] del_list = [] for s in flib_flags: v = '--fcompiler=' if s[:len(v)] == v: - if MESON_ONLY_VER or backend_key == 'meson': - outmess( - "--fcompiler cannot be used with meson," - "set compiler with the FC environment variable\n" - ) - else: - from numpy.distutils import fcompiler - fcompiler.load_all_fcompiler_classes() - allowed_keys = list(fcompiler.fcompiler_class.keys()) - nv = ov = s[len(v):].lower() - if ov not in allowed_keys: - vmap = {} # XXX - try: - nv = vmap[ov] - except KeyError: - if ov not in vmap.values(): - print(f'Unknown vendor: "{s[len(v):]}"') - nv = ov - i = flib_flags.index(s) - flib_flags[i] = '--fcompiler=' + nv # noqa: B909 - continue + outmess( + "--fcompiler cannot be used with meson," + "set compiler with the FC environment variable\n" + ) for s in del_list: i = flib_flags.index(s) del flib_flags[i] @@ -771,15 +749,6 @@ def validate_modulename(pyf_files, modulename='untitled'): return modulename def main(): - if '--help-link' in sys.argv[1:]: - sys.argv.remove('--help-link') - if MESON_ONLY_VER: - outmess("Use --dep for meson builds\n") - else: - from numpy.distutils.system_info import show_all - show_all() - return - if '-c' in sys.argv[1:]: run_compile() else: diff --git a/numpy/f2py/f2py2e.pyi b/numpy/f2py/f2py2e.pyi index dd1d0c39e8a5..4dd6a9f73ec3 100644 --- a/numpy/f2py/f2py2e.pyi +++ b/numpy/f2py/f2py2e.pyi @@ -2,19 +2,13 @@ import argparse import pprint from collections.abc import Hashable, Iterable, Mapping, MutableMapping, Sequence from types import ModuleType -from typing import Any, Final, NotRequired, TypedDict, type_check_only - -from typing_extensions import TypeVar, override +from typing import Any, Final, NotRequired, TypedDict, override, type_check_only from .__version__ import version -from .auxfuncs import _Bool -from .auxfuncs import outmess as outmess +from .auxfuncs import _Bool, outmess as outmess ### -_KT = TypeVar("_KT", bound=Hashable) -_VT = TypeVar("_VT") - @type_check_only class _F2PyDict(TypedDict): csrc: list[str] @@ -30,7 +24,6 @@ class _PreparseResult(TypedDict): ### -MESON_ONLY_VER: Final[bool] f2py_version: Final = version numpy_version: Final = version __usage__: Final[str] @@ -57,7 +50,7 @@ def main() -> None: ... def scaninputline(inputline: Iterable[str]) -> tuple[list[str], dict[str, _Bool]]: ... def callcrackfortran(files: list[str], options: dict[str, bool]) -> list[dict[str, Any]]: ... def buildmodules(lst: Iterable[Mapping[str, object]]) -> dict[str, dict[str, Any]]: ... -def dict_append(d_out: MutableMapping[_KT, _VT], d_in: Mapping[_KT, _VT]) -> None: ... +def dict_append[KT: Hashable, VT](d_out: MutableMapping[KT, VT], d_in: Mapping[KT, VT]) -> None: ... def filter_files( prefix: str, suffix: str, diff --git a/numpy/f2py/f90mod_rules.py b/numpy/f2py/f90mod_rules.py index 29adbe78a26f..d13a42a9d71f 100644 --- a/numpy/f2py/f90mod_rules.py +++ b/numpy/f2py/f90mod_rules.py @@ -120,6 +120,10 @@ def dadd(line, s=doc): if m['name'] in usenames and containscommon(m): outmess(f"\t\t\tSkipping {m['name']} since it is in 'use' and contains a common block...\n") continue + # skip modules with derived types + if m['name'] in usenames and containsderivedtypes(m): + outmess(f"\t\t\tSkipping {m['name']} since it is in 'use' and contains a derived type...\n") + continue if onlyvars: outmess(f"\t\t Variables: {' '.join(onlyvars)}\n") chooks = [''] diff --git a/numpy/f2py/func2subr.py b/numpy/f2py/func2subr.py index 0a875006ed75..09b67f7c3085 100644 --- a/numpy/f2py/func2subr.py +++ b/numpy/f2py/func2subr.py @@ -77,7 +77,7 @@ def var2fixfortran(vars, a, fa=None, f90mode=None): def useiso_c_binding(rout): useisoc = False - for key, value in rout['vars'].items(): + for value in rout['vars'].values(): kind_value = value.get('kindselector', {}).get('kind') if kind_value in isoc_kindmap: return True diff --git a/numpy/f2py/rules.py b/numpy/f2py/rules.py index 4122f0a49f17..68c49e60028e 100644 --- a/numpy/f2py/rules.py +++ b/numpy/f2py/rules.py @@ -286,7 +286,7 @@ #initcommonhooks# #interface_usercode# -#if Py_GIL_DISABLED +#ifdef Py_GIL_DISABLED // signal whether this module supports running with the GIL disabled PyUnstable_Module_SetGIL(m , #gil_used#); #endif @@ -1154,7 +1154,7 @@ 'frompyobj': [ ' #setdims#;', ' capi_#varname#_intent |= #intent#;', - (' const char * capi_errmess = "#modulename#.#pyname#:' + (' const char capi_errmess[] = "#modulename#.#pyname#:' ' failed to create array from the #nth# `#varname#`";'), {isintent_hide: ' capi_#varname#_as_array = ndarray_from_pyobj(' @@ -1184,9 +1184,10 @@ """\ int *_i,capi_i=0; CFUNCSMESS(\"#name#: Initializing #varname#=#init#\\n\"); - if (initforcomb(PyArray_DIMS(capi_#varname#_as_array), + struct ForcombCache cache; + if (initforcomb(&cache, PyArray_DIMS(capi_#varname#_as_array), PyArray_NDIM(capi_#varname#_as_array),1)) { - while ((_i = nextforcomb())) + while ((_i = nextforcomb(&cache))) #varname#[capi_i++] = #init#; /* fortran way */ } else { PyObject *exc, *val, *tb; diff --git a/numpy/f2py/rules.pyi b/numpy/f2py/rules.pyi index aa91e942698a..c45d42289363 100644 --- a/numpy/f2py/rules.pyi +++ b/numpy/f2py/rules.pyi @@ -1,19 +1,12 @@ from collections.abc import Callable, Iterable, Mapping -from typing import Any, Final, TypeAlias -from typing import Literal as L - -from typing_extensions import TypeVar +from typing import Any, Final, Literal as L from .__version__ import version from .auxfuncs import _Bool, _Var -### - -_VT = TypeVar("_VT", default=str) - -_Predicate: TypeAlias = Callable[[_Var], _Bool] -_RuleDict: TypeAlias = dict[str, _VT] -_DefDict: TypeAlias = dict[_Predicate, _VT] +type _Predicate = Callable[[_Var], _Bool] +type _RuleDict[VT] = dict[str, VT] +type _DefDict[VT] = dict[_Predicate, VT] ### @@ -26,9 +19,9 @@ sepdict: Final[dict[str, str]] = ... generationtime: Final[int] = ... typedef_need_dict: Final[_DefDict[str]] = ... -module_rules: Final[_RuleDict[str | list[str] | _RuleDict]] = ... -routine_rules: Final[_RuleDict[str | list[str] | _DefDict | _RuleDict]] = ... -defmod_rules: Final[list[_RuleDict[str | _DefDict]]] = ... +module_rules: Final[_RuleDict[str | list[str] | _RuleDict[str]]] = ... +routine_rules: Final[_RuleDict[str | list[str] | _DefDict[str] | _RuleDict[str]]] = ... +defmod_rules: Final[list[_RuleDict[str | _DefDict[str]]]] = ... rout_rules: Final[list[_RuleDict[str | Any]]] = ... aux_rules: Final[list[_RuleDict[str | Any]]] = ... arg_rules: Final[list[_RuleDict[str | Any]]] = ... @@ -36,8 +29,8 @@ check_rules: Final[list[_RuleDict[str | Any]]] = ... stnd: Final[dict[L[1, 2, 3, 4, 5, 6, 7, 8, 9, 0], L["st", "nd", "rd", "th"]]] = ... -def buildmodule(m: Mapping[str, str | Any], um: Iterable[Mapping[str, str | Any]]) -> _RuleDict: ... -def buildapi(rout: Mapping[str, str]) -> tuple[_RuleDict, str]: ... +def buildmodule(m: Mapping[str, str | Any], um: Iterable[Mapping[str, str | Any]]) -> _RuleDict[str]: ... +def buildapi(rout: Mapping[str, str]) -> tuple[_RuleDict[str], str]: ... # namespace pollution k: str diff --git a/numpy/f2py/src/fortranobject.c b/numpy/f2py/src/fortranobject.c index 4e2aa370b643..d6664d6bdfb7 100644 --- a/numpy/f2py/src/fortranobject.c +++ b/numpy/f2py/src/fortranobject.c @@ -47,7 +47,7 @@ F2PySwapThreadLocalCallbackPtr(char *key, void *ptr) "failed"); } - value = PyDict_GetItemString(local_dict, key); + value = PyDict_GetItemString(local_dict, key); // noqa: borrowed-ref OK if (value != NULL) { prev = PyLong_AsVoidPtr(value); if (PyErr_Occurred()) { @@ -87,7 +87,7 @@ F2PyGetThreadLocalCallbackPtr(char *key) "F2PyGetThreadLocalCallbackPtr: PyThreadState_GetDict failed"); } - value = PyDict_GetItemString(local_dict, key); + value = PyDict_GetItemString(local_dict, key); // noqa: borrowed-ref OK if (value != NULL) { prev = PyLong_AsVoidPtr(value); if (PyErr_Occurred()) { @@ -363,7 +363,9 @@ fortran_getattr(PyFortranObject *fp, char *name) { int i, j, k, flag; if (fp->dict != NULL) { - PyObject *v = _PyDict_GetItemStringWithError(fp->dict, name); + // python 3.13 added PyDict_GetItemRef +#if PY_VERSION_HEX < 0x030D0000 + PyObject *v = _PyDict_GetItemStringWithError(fp->dict, name); // noqa: borrowed-ref OK if (v == NULL && PyErr_Occurred()) { return NULL; } @@ -371,6 +373,17 @@ fortran_getattr(PyFortranObject *fp, char *name) Py_INCREF(v); return v; } +#else + PyObject *v; + int result = PyDict_GetItemStringRef(fp->dict, name, &v); + if (result == -1) { + return NULL; + } + else if (result == 1) { + return v; + } +#endif + } for (i = 0, j = 1; i < fp->len && (j = strcmp(name, fp->defs[i].name)); i++) @@ -809,7 +822,7 @@ get_elsize(PyObject *obj) { } else if (PyUnicode_Check(obj)) { return PyUnicode_GET_LENGTH(obj); } else if (PySequence_Check(obj)) { - PyObject* fast = PySequence_Fast(obj, "f2py:fortranobject.c:get_elsize"); + PyObject* fast = PySequence_Fast(obj, "f2py:fortranobject.c:get_elsize"); // noqa: borrowed-ref OK if (fast != NULL) { Py_ssize_t i, n = PySequence_Fast_GET_SIZE(fast); int sz, elsize = 0; diff --git a/numpy/f2py/symbolic.py b/numpy/f2py/symbolic.py index 11645172fe30..c768b3c470ed 100644 --- a/numpy/f2py/symbolic.py +++ b/numpy/f2py/symbolic.py @@ -155,14 +155,14 @@ def ewarn(message): class Expr: - """Represents a Fortran expression as a op-data pair. + """Represents a Fortran expression as an op-data pair. Expr instances are hashable and sortable. """ @staticmethod def parse(s, language=Language.C): - """Parse a Fortran expression to a Expr. + """Parse a Fortran expression to an Expr. """ return fromstring(s, language=language) @@ -1236,6 +1236,8 @@ def replace_parenthesis(s): i = mn_i j = s.find(right, i) + if j == -1: + raise ValueError(f'Mismatch of {left + right} parenthesis in {s!r}') while s.count(left, i + 1, j) != s.count(right, i + 1, j): j = s.find(right, j + 1) @@ -1478,7 +1480,7 @@ def restore(r): if isinstance(items, Expr): return items if paren in ['ROUNDDIV', 'SQUARE']: - # Expression is a array constructor + # Expression is an array constructor if isinstance(items, Expr): items = (items,) return as_array(items) diff --git a/numpy/f2py/symbolic.pyi b/numpy/f2py/symbolic.pyi index 74e7a48ab327..542dde12791f 100644 --- a/numpy/f2py/symbolic.pyi +++ b/numpy/f2py/symbolic.pyi @@ -1,30 +1,25 @@ from collections.abc import Callable, Mapping from enum import Enum -from typing import Any, Generic, ParamSpec, Self, TypeAlias, overload -from typing import Literal as L - +from typing import Any, Generic, Literal as L, Self, overload from typing_extensions import TypeVar __all__ = ["Expr"] ### -_Tss = ParamSpec("_Tss") -_ExprT = TypeVar("_ExprT", bound=Expr) -_ExprT1 = TypeVar("_ExprT1", bound=Expr) -_ExprT2 = TypeVar("_ExprT2", bound=Expr) +# Explicit covariance is required here due to the inexpressible read-only attributes. _OpT_co = TypeVar("_OpT_co", bound=Op, default=Op, covariant=True) _LanguageT_co = TypeVar("_LanguageT_co", bound=Language, default=Language, covariant=True) _DataT_co = TypeVar("_DataT_co", default=Any, covariant=True) _LeftT_co = TypeVar("_LeftT_co", default=Any, covariant=True) _RightT_co = TypeVar("_RightT_co", default=Any, covariant=True) -_RelCOrPy: TypeAlias = L["==", "!=", "<", "<=", ">", ">="] -_RelFortran: TypeAlias = L[".eq.", ".ne.", ".lt.", ".le.", ".gt.", ".ge."] +type _RelCOrPy = L["==", "!=", "<", "<=", ">", ">="] +type _RelFortran = L[".eq.", ".ne.", ".lt.", ".le.", ".gt.", ".ge."] -_ToExpr: TypeAlias = Expr | complex | str -_ToExprN: TypeAlias = _ToExpr | tuple[_ToExprN, ...] -_NestedString: TypeAlias = str | tuple[_NestedString, ...] | list[_NestedString] +type _ToExpr = Expr | complex | str +type _ToExprN = _ToExpr | tuple[_ToExprN, ...] +type _NestedString = str | tuple[_NestedString, ...] | list[_NestedString] ### @@ -99,8 +94,8 @@ class Precedence(Enum): NONE = 100 class Expr(Generic[_OpT_co, _DataT_co]): - op: _OpT_co - data: _DataT_co + op: _OpT_co # read-only + data: _DataT_co # read-only @staticmethod def parse(s: str, language: Language = ...) -> Expr: ... @@ -151,7 +146,7 @@ class Expr(Generic[_OpT_co, _DataT_co]): # @overload - def __getitem__(self, index: _ExprT | tuple[_ExprT], /) -> Expr[L[Op.INDEXING], tuple[Self, _ExprT]]: ... + def __getitem__[ExprT: Expr](self, index: ExprT | tuple[ExprT], /) -> Expr[L[Op.INDEXING], tuple[Self, ExprT]]: ... @overload def __getitem__(self, index: _ToExpr | tuple[_ToExpr], /) -> Expr[L[Op.INDEXING], tuple[Self, Expr]]: ... @@ -160,9 +155,9 @@ class Expr(Generic[_OpT_co, _DataT_co]): # @overload - def traverse(self, /, visit: Callable[_Tss, None], *args: _Tss.args, **kwargs: _Tss.kwargs) -> Expr: ... + def traverse[**Tss](self, /, visit: Callable[Tss, None], *args: Tss.args, **kwargs: Tss.kwargs) -> Expr: ... @overload - def traverse(self, /, visit: Callable[_Tss, _ExprT], *args: _Tss.args, **kwargs: _Tss.kwargs) -> _ExprT: ... + def traverse[**Tss, ExprT: Expr](self, /, visit: Callable[Tss, ExprT], *args: Tss.args, **kwargs: Tss.kwargs) -> ExprT: ... # def contains(self, /, other: Expr) -> bool: ... @@ -178,23 +173,23 @@ class Expr(Generic[_OpT_co, _DataT_co]): def tostring(self, /, parent_precedence: Precedence = ..., language: Language = ...) -> str: ... class _Pair(Generic[_LeftT_co, _RightT_co]): - left: _LeftT_co - right: _RightT_co + left: _LeftT_co # read-only + right: _RightT_co # read-only def __init__(self, /, left: _LeftT_co, right: _RightT_co) -> None: ... # @overload - def substitute(self: _Pair[_ExprT1, _ExprT2], /, symbols_map: Mapping[Expr, Expr]) -> _Pair[Expr, Expr]: ... + def substitute[ExprT: Expr](self: _Pair[ExprT, ExprT], /, symbols_map: Mapping[Expr, Expr]) -> _Pair[Expr, Expr]: ... @overload - def substitute(self: _Pair[_ExprT1, object], /, symbols_map: Mapping[Expr, Expr]) -> _Pair[Expr, Any]: ... + def substitute[ExprT: Expr](self: _Pair[ExprT, object], /, symbols_map: Mapping[Expr, Expr]) -> _Pair[Expr, Any]: ... @overload - def substitute(self: _Pair[object, _ExprT2], /, symbols_map: Mapping[Expr, Expr]) -> _Pair[Any, Expr]: ... + def substitute[ExprT: Expr](self: _Pair[object, ExprT], /, symbols_map: Mapping[Expr, Expr]) -> _Pair[Any, Expr]: ... @overload def substitute(self, /, symbols_map: Mapping[Expr, Expr]) -> _Pair: ... class _FromStringWorker(Generic[_LanguageT_co]): - language: _LanguageT_co + language: _LanguageT_co # read-only original: str | None quotes_map: dict[str, str] diff --git a/numpy/f2py/tests/src/array_from_pyobj/wrapmodule.c b/numpy/f2py/tests/src/array_from_pyobj/wrapmodule.c index b66672a43e21..25866f1a40ec 100644 --- a/numpy/f2py/tests/src/array_from_pyobj/wrapmodule.c +++ b/numpy/f2py/tests/src/array_from_pyobj/wrapmodule.c @@ -223,7 +223,7 @@ PyMODINIT_FUNC PyInit_test_array_from_pyobj_ext(void) { on_exit(f2py_report_on_exit,(void*)"array_from_pyobj.wrap.call"); #endif -#if Py_GIL_DISABLED +#ifdef Py_GIL_DISABLED // signal whether this module supports running with the GIL disabled PyUnstable_Module_SetGIL(m, Py_MOD_GIL_NOT_USED); #endif diff --git a/numpy/f2py/tests/src/regression/mod_derived_types.f90 b/numpy/f2py/tests/src/regression/mod_derived_types.f90 new file mode 100644 index 000000000000..7692c82cf42e --- /dev/null +++ b/numpy/f2py/tests/src/regression/mod_derived_types.f90 @@ -0,0 +1,23 @@ +module mtypes + implicit none + integer, parameter :: value1 = 100 + type :: master_data + integer :: idat = 200 + end type master_data + type(master_data) :: masterdata +end module mtypes + + +subroutine no_type_subroutine(ain, aout) + use mtypes, only: value1 + integer, intent(in) :: ain + integer, intent(out) :: aout + aout = ain + value1 +end subroutine no_type_subroutine + +subroutine type_subroutine(ain, aout) + use mtypes, only: masterdata + integer, intent(in) :: ain + integer, intent(out) :: aout + aout = ain + masterdata%idat +end subroutine type_subroutine \ No newline at end of file diff --git a/numpy/f2py/tests/test_array_from_pyobj.py b/numpy/f2py/tests/test_array_from_pyobj.py index a8f952752cf4..15383e9431cc 100644 --- a/numpy/f2py/tests/test_array_from_pyobj.py +++ b/numpy/f2py/tests/test_array_from_pyobj.py @@ -147,9 +147,9 @@ def is_intent_exact(self, *names): # and several tests fail as the alignment flag can be randomly true or false # when numpy gains an aligned allocator the tests could be enabled again # -# Furthermore, on macOS ARM64, LONGDOUBLE is an alias for DOUBLE. +# Furthermore, on macOS ARM64 and AIX, LONGDOUBLE is an alias for DOUBLE. if ((np.intp().dtype.itemsize != 4 or np.clongdouble().dtype.alignment <= 8) - and sys.platform != "win32" + and sys.platform not in ["win32", "aix"] and (platform.system(), platform.processor()) != ("Darwin", "arm")): _type_names.extend(["LONGDOUBLE", "CDOUBLE", "CLONGDOUBLE"]) _cast_dict["LONGDOUBLE"] = _cast_dict["LONG"] + [ diff --git a/numpy/f2py/tests/test_docs.py b/numpy/f2py/tests/test_docs.py index 5d9aaac9f15b..7015af3b2627 100644 --- a/numpy/f2py/tests/test_docs.py +++ b/numpy/f2py/tests/test_docs.py @@ -60,5 +60,7 @@ def test_ftype(self): ftype.data.x[1] = 45 assert_array_equal(ftype.data.x, np.array([1, 45, 3], dtype=np.float32)) + # gh-26718 Cleanup for repeated test runs + ftype.data.a = 0 # TODO: implement test methods for other example Fortran codes diff --git a/numpy/f2py/tests/test_f2py2e.py b/numpy/f2py/tests/test_f2py2e.py index 2f91eb77c4bd..bd7064fd348a 100644 --- a/numpy/f2py/tests/test_f2py2e.py +++ b/numpy/f2py/tests/test_f2py2e.py @@ -232,10 +232,8 @@ def test_untitled_cli(capfd, hello_world_f90, monkeypatch): out, _ = capfd.readouterr() assert "untitledmodule.c" in out - -@pytest.mark.skipif((platform.system() != 'Linux') or (sys.version_info <= (3, 12)), reason='Compiler and 3.12 required') -def test_no_py312_distutils_fcompiler(capfd, hello_world_f90, monkeypatch): - """Check that no distutils imports are performed on 3.12 +def test_no_distutils_backend(capfd, hello_world_f90, monkeypatch): + """Check that distutils backend and related options fail CLI :: --fcompiler --help-link --backend distutils """ MNAME = "hi" @@ -248,22 +246,23 @@ def test_no_py312_distutils_fcompiler(capfd, hello_world_f90, monkeypatch): compiler_check_f2pycli() out, _ = capfd.readouterr() assert "--fcompiler cannot be used with meson" in out + monkeypatch.setattr( sys, "argv", ["f2py", "--help-link"] ) - with util.switchdir(ipath.parent): + with pytest.raises(SystemExit): f2pycli() out, _ = capfd.readouterr() - assert "Use --dep for meson builds" in out - MNAME = "hi2" # Needs to be different for a new -c + assert "Unknown option --help-link" in out + monkeypatch.setattr( - sys, "argv", f"f2py {ipath} -c -m {MNAME} --backend distutils".split() + sys, "argv", ["f2py", "--backend", "distutils"] ) - with util.switchdir(ipath.parent): + with pytest.raises(SystemExit): + compiler_check_f2pycli() f2pycli() out, _ = capfd.readouterr() - assert "Cannot use distutils backend with Python>=3.12" in out - + assert "'distutils' backend was removed" in out @pytest.mark.xfail def test_f2py_skip(capfd, retreal_f77, monkeypatch): @@ -673,6 +672,25 @@ def test_inclheader(capfd, hello_world_f90, monkeypatch): assert "#include " in ocmr assert "#include " in ocmr +@pytest.mark.skipif((platform.system() != 'Linux'), reason='Compiler required') +def test_cli_obj(capfd, hello_world_f90, monkeypatch): + """Ensures that the extra object can be specified when using meson backend + """ + ipath = Path(hello_world_f90) + mname = "blah" + odir = "tttmp" + obj = "extra.o" + monkeypatch.setattr(sys, "argv", + f'f2py --backend meson --build-dir {odir} -m {mname} -c {obj} {ipath}'.split()) + + with util.switchdir(ipath.parent): + Path(obj).touch() + compiler_check_f2pycli() + with Path(f"{odir}/meson.build").open() as mesonbuild: + mbld = mesonbuild.read() + assert "objects:" in mbld + assert f"'''{obj}'''" in mbld + def test_inclpath(): """Add to the include directories diff --git a/numpy/f2py/tests/test_kind.py b/numpy/f2py/tests/test_kind.py index ce223a555456..c219cc8bfd09 100644 --- a/numpy/f2py/tests/test_kind.py +++ b/numpy/f2py/tests/test_kind.py @@ -5,13 +5,12 @@ from numpy.f2py.crackfortran import ( _selected_int_kind_func as selected_int_kind, -) -from numpy.f2py.crackfortran import ( _selected_real_kind_func as selected_real_kind, ) from . import util +IS_PPC_OR_AIX = platform.machine().lower().startswith("ppc") or platform.system() == 'AIX' class TestKind(util.F2PyTest): sources = [util.getpath("tests", "src", "kind", "foo.f90")] @@ -39,7 +38,7 @@ def test_real(self): i ), f"selectedrealkind({i}): expected {selected_real_kind(i)!r} but got {selectedrealkind(i)!r}" - @pytest.mark.xfail(platform.machine().lower().startswith("ppc"), + @pytest.mark.xfail(IS_PPC_OR_AIX, reason="Some PowerPC may not support full IEEE 754 precision") def test_quad_precision(self): """ diff --git a/numpy/f2py/tests/test_regression.py b/numpy/f2py/tests/test_regression.py index 1931ad21a48b..c4636a764914 100644 --- a/numpy/f2py/tests/test_regression.py +++ b/numpy/f2py/tests/test_regression.py @@ -37,6 +37,16 @@ def test_mdat(self): assert self.module.simple_subroutine(5) == 1014 +class TestModuleWithDerivedType(util.F2PyTest): + # Check that modules with derived types work + sources = [util.getpath("tests", "src", "regression", "mod_derived_types.f90")] + + @pytest.mark.slow + def test_mtypes(self): + assert self.module.no_type_subroutine(10) == 110 + assert self.module.type_subroutine(10) == 210 + + class TestNegativeBounds(util.F2PyTest): # Check that negative bounds work correctly sources = [util.getpath("tests", "src", "negative_bounds", "issue_20853.f90")] @@ -148,7 +158,7 @@ def test_gh26623(): @pytest.mark.slow -@pytest.mark.skipif(platform.system() not in ['Linux', 'Darwin'], reason='Unsupported on this platform for now') +@pytest.mark.skipif(platform.system() == "Windows", reason='Unsupported on this platform for now') def test_gh25784(): # Compile dubious file using passed flags try: @@ -157,7 +167,7 @@ def test_gh25784(): options=[ # Meson will collect and dedup these to pass to fortran_args: "--f77flags='-ffixed-form -O2'", - "--f90flags=\"-ffixed-form -Og\"", + "--f90flags=\"-ffixed-form -g\"", ], module_name="Blah", ) diff --git a/numpy/f2py/tests/test_return_integer.py b/numpy/f2py/tests/test_return_integer.py index 13a9f862f311..50309d5dadaf 100644 --- a/numpy/f2py/tests/test_return_integer.py +++ b/numpy/f2py/tests/test_return_integer.py @@ -29,8 +29,8 @@ def check_function(self, t, tname): pytest.raises(IndexError, t, []) pytest.raises(IndexError, t, ()) - pytest.raises(Exception, t, t) - pytest.raises(Exception, t, {}) + pytest.raises(TypeError, t, t) + pytest.raises(TypeError, t, {}) if tname in ["t8", "s8"]: pytest.raises(OverflowError, t, 100000000000000000000000) diff --git a/numpy/f2py/tests/test_return_real.py b/numpy/f2py/tests/test_return_real.py index c871ed3d4fc2..4339657aa013 100644 --- a/numpy/f2py/tests/test_return_real.py +++ b/numpy/f2py/tests/test_return_real.py @@ -39,8 +39,8 @@ def check_function(self, t, tname): pytest.raises(IndexError, t, []) pytest.raises(IndexError, t, ()) - pytest.raises(Exception, t, t) - pytest.raises(Exception, t, {}) + pytest.raises(TypeError, t, t) + pytest.raises(TypeError, t, {}) try: r = t(10**400) diff --git a/numpy/f2py/tests/test_symbolic.py b/numpy/f2py/tests/test_symbolic.py index ec23f522128b..fbf5abd9aa18 100644 --- a/numpy/f2py/tests/test_symbolic.py +++ b/numpy/f2py/tests/test_symbolic.py @@ -493,3 +493,8 @@ def test_polynomial_atoms(self): assert (y(x) + x).polynomial_atoms() == {y(x), x} assert (y(x) * x[y]).polynomial_atoms() == {y(x), x[y]} assert (y(x)**x).polynomial_atoms() == {y(x)} + + def test_unmatched_parenthesis_gh30268(self): + #gh - 30268 + with pytest.raises(ValueError, match=r"Mismatch of \(\) parenthesis"): + Expr.parse("DATA (A, I=1, N", language=Language.Fortran) diff --git a/numpy/fft/__init__.py b/numpy/fft/__init__.py index 55f7320f653f..2de162c5ec71 100644 --- a/numpy/fft/__init__.py +++ b/numpy/fft/__init__.py @@ -200,9 +200,7 @@ """ -# TODO: `numpy.fft.helper`` was deprecated in NumPy 2.0. It should -# be deleted once downstream libraries move to `numpy.fft`. -from . import _helper, _pocketfft, helper +from . import _helper, _pocketfft from ._helper import * from ._pocketfft import * diff --git a/numpy/fft/__init__.pyi b/numpy/fft/__init__.pyi index 54d0ea8c79b6..893a697f1398 100644 --- a/numpy/fft/__init__.pyi +++ b/numpy/fft/__init__.pyi @@ -1,9 +1,4 @@ -from ._helper import ( - fftfreq, - fftshift, - ifftshift, - rfftfreq, -) +from ._helper import fftfreq, fftshift, ifftshift, rfftfreq from ._pocketfft import ( fft, fft2, diff --git a/numpy/fft/_helper.py b/numpy/fft/_helper.py index 77adeac9207f..b3598534bcdf 100644 --- a/numpy/fft/_helper.py +++ b/numpy/fft/_helper.py @@ -156,7 +156,7 @@ def fftfreq(n, d=1.0, device=None): Examples -------- >>> import numpy as np - >>> signal = np.array([-2, 8, 6, 4, 1, 0, 3, 5], dtype=float) + >>> signal = np.array([-2, 8, 6, 4, 1, 0, 3, 5], dtype=np.float64) >>> fourier = np.fft.fft(signal) >>> n = signal.size >>> timestep = 0.1 @@ -215,7 +215,7 @@ def rfftfreq(n, d=1.0, device=None): Examples -------- >>> import numpy as np - >>> signal = np.array([-2, 8, 6, 4, 1, 0, 3, 5, -3, 4], dtype=float) + >>> signal = np.array([-2, 8, 6, 4, 1, 0, 3, 5, -3, 4], dtype=np.float64) >>> fourier = np.fft.rfft(signal) >>> n = signal.size >>> sample_rate = 100 diff --git a/numpy/fft/_helper.pyi b/numpy/fft/_helper.pyi index d06bda7ad9a9..8f1a3c7bab89 100644 --- a/numpy/fft/_helper.pyi +++ b/numpy/fft/_helper.pyi @@ -1,5 +1,4 @@ -from typing import Any, Final, TypeVar, overload -from typing import Literal as L +from typing import Any, Final, Literal as L, overload from numpy import complexfloating, floating, generic, integer from numpy._typing import ( @@ -13,8 +12,6 @@ from numpy._typing import ( __all__ = ["fftfreq", "fftshift", "ifftshift", "rfftfreq"] -_ScalarT = TypeVar("_ScalarT", bound=generic) - ### integer_types: Final[tuple[type[int], type[integer]]] = ... @@ -22,13 +19,13 @@ integer_types: Final[tuple[type[int], type[integer]]] = ... ### @overload -def fftshift(x: _ArrayLike[_ScalarT], axes: _ShapeLike | None = None) -> NDArray[_ScalarT]: ... +def fftshift[ScalarT: generic](x: _ArrayLike[ScalarT], axes: _ShapeLike | None = None) -> NDArray[ScalarT]: ... @overload def fftshift(x: ArrayLike, axes: _ShapeLike | None = None) -> NDArray[Any]: ... # @overload -def ifftshift(x: _ArrayLike[_ScalarT], axes: _ShapeLike | None = None) -> NDArray[_ScalarT]: ... +def ifftshift[ScalarT: generic](x: _ArrayLike[ScalarT], axes: _ShapeLike | None = None) -> NDArray[ScalarT]: ... @overload def ifftshift(x: ArrayLike, axes: _ShapeLike | None = None) -> NDArray[Any]: ... diff --git a/numpy/fft/_pocketfft.py b/numpy/fft/_pocketfft.py index c7f2f6a8bc3a..93f96c9a10b6 100644 --- a/numpy/fft/_pocketfft.py +++ b/numpy/fft/_pocketfft.py @@ -124,7 +124,7 @@ def fft(a, n=None, axis=-1, norm=None, out=None): This function computes the one-dimensional *n*-point discrete Fourier Transform (DFT) with the efficient Fast Fourier Transform (FFT) - algorithm [CT]. + algorithm [CT]_. Parameters ---------- @@ -302,7 +302,7 @@ def ifft(a, n=None, axis=-1, norm=None, out=None): >>> import matplotlib.pyplot as plt >>> t = np.arange(400) - >>> n = np.zeros((400,), dtype=complex) + >>> n = np.zeros((400,), dtype=np.complex128) >>> n[40:60] = np.exp(1j*np.random.uniform(0, 2*np.pi, (20,))) >>> s = np.fft.ifft(n) >>> plt.plot(t, s.real, label='real') @@ -1005,7 +1005,7 @@ def ifftn(a, s=None, axes=None, norm=None, out=None): Create and plot an image with band-limited frequency content: >>> import matplotlib.pyplot as plt - >>> n = np.zeros((200,200), dtype=complex) + >>> n = np.zeros((200,200), dtype=np.complex128) >>> n[60:80, 20:40] = np.exp(1j*np.random.uniform(0, 2*np.pi, (20, 20))) >>> im = np.fft.ifftn(n).real >>> plt.imshow(im) diff --git a/numpy/fft/_pocketfft.pyi b/numpy/fft/_pocketfft.pyi index 215cf14d1395..d34404edb149 100644 --- a/numpy/fft/_pocketfft.pyi +++ b/numpy/fft/_pocketfft.pyi @@ -1,6 +1,5 @@ from collections.abc import Sequence from typing import Literal as L -from typing import TypeAlias from numpy import complex128, float64 from numpy._typing import ArrayLike, NDArray, _ArrayLikeNumber_co @@ -22,117 +21,117 @@ __all__ = [ "ifftn", ] -_NormKind: TypeAlias = L["backward", "ortho", "forward"] | None +type _NormKind = L["backward", "ortho", "forward"] | None def fft( a: ArrayLike, - n: int | None = ..., - axis: int = ..., - norm: _NormKind = ..., - out: NDArray[complex128] | None = ..., + n: int | None = None, + axis: int = -1, + norm: _NormKind = None, + out: NDArray[complex128] | None = None, ) -> NDArray[complex128]: ... def ifft( a: ArrayLike, - n: int | None = ..., - axis: int = ..., - norm: _NormKind = ..., - out: NDArray[complex128] | None = ..., + n: int | None = None, + axis: int = -1, + norm: _NormKind = None, + out: NDArray[complex128] | None = None, ) -> NDArray[complex128]: ... def rfft( a: ArrayLike, - n: int | None = ..., - axis: int = ..., - norm: _NormKind = ..., - out: NDArray[complex128] | None = ..., + n: int | None = None, + axis: int = -1, + norm: _NormKind = None, + out: NDArray[complex128] | None = None, ) -> NDArray[complex128]: ... def irfft( a: ArrayLike, - n: int | None = ..., - axis: int = ..., - norm: _NormKind = ..., - out: NDArray[float64] | None = ..., + n: int | None = None, + axis: int = -1, + norm: _NormKind = None, + out: NDArray[float64] | None = None, ) -> NDArray[float64]: ... # Input array must be compatible with `np.conjugate` def hfft( a: _ArrayLikeNumber_co, - n: int | None = ..., - axis: int = ..., - norm: _NormKind = ..., - out: NDArray[float64] | None = ..., + n: int | None = None, + axis: int = -1, + norm: _NormKind = None, + out: NDArray[float64] | None = None, ) -> NDArray[float64]: ... def ihfft( a: ArrayLike, - n: int | None = ..., - axis: int = ..., - norm: _NormKind = ..., - out: NDArray[complex128] | None = ..., + n: int | None = None, + axis: int = -1, + norm: _NormKind = None, + out: NDArray[complex128] | None = None, ) -> NDArray[complex128]: ... def fftn( a: ArrayLike, - s: Sequence[int] | None = ..., - axes: Sequence[int] | None = ..., - norm: _NormKind = ..., - out: NDArray[complex128] | None = ..., + s: Sequence[int] | None = None, + axes: Sequence[int] | None = None, + norm: _NormKind = None, + out: NDArray[complex128] | None = None, ) -> NDArray[complex128]: ... def ifftn( a: ArrayLike, - s: Sequence[int] | None = ..., - axes: Sequence[int] | None = ..., - norm: _NormKind = ..., - out: NDArray[complex128] | None = ..., + s: Sequence[int] | None = None, + axes: Sequence[int] | None = None, + norm: _NormKind = None, + out: NDArray[complex128] | None = None, ) -> NDArray[complex128]: ... def rfftn( a: ArrayLike, - s: Sequence[int] | None = ..., - axes: Sequence[int] | None = ..., - norm: _NormKind = ..., - out: NDArray[complex128] | None = ..., + s: Sequence[int] | None = None, + axes: Sequence[int] | None = None, + norm: _NormKind = None, + out: NDArray[complex128] | None = None, ) -> NDArray[complex128]: ... def irfftn( a: ArrayLike, - s: Sequence[int] | None = ..., - axes: Sequence[int] | None = ..., - norm: _NormKind = ..., - out: NDArray[float64] | None = ..., + s: Sequence[int] | None = None, + axes: Sequence[int] | None = None, + norm: _NormKind = None, + out: NDArray[float64] | None = None, ) -> NDArray[float64]: ... def fft2( a: ArrayLike, - s: Sequence[int] | None = ..., - axes: Sequence[int] | None = ..., - norm: _NormKind = ..., - out: NDArray[complex128] | None = ..., + s: Sequence[int] | None = None, + axes: Sequence[int] | None = (-2, -1), + norm: _NormKind = None, + out: NDArray[complex128] | None = None, ) -> NDArray[complex128]: ... def ifft2( a: ArrayLike, - s: Sequence[int] | None = ..., - axes: Sequence[int] | None = ..., - norm: _NormKind = ..., - out: NDArray[complex128] | None = ..., + s: Sequence[int] | None = None, + axes: Sequence[int] | None = (-2, -1), + norm: _NormKind = None, + out: NDArray[complex128] | None = None, ) -> NDArray[complex128]: ... def rfft2( a: ArrayLike, - s: Sequence[int] | None = ..., - axes: Sequence[int] | None = ..., - norm: _NormKind = ..., - out: NDArray[complex128] | None = ..., + s: Sequence[int] | None = None, + axes: Sequence[int] | None = (-2, -1), + norm: _NormKind = None, + out: NDArray[complex128] | None = None, ) -> NDArray[complex128]: ... def irfft2( a: ArrayLike, - s: Sequence[int] | None = ..., - axes: Sequence[int] | None = ..., - norm: _NormKind = ..., - out: NDArray[float64] | None = ..., + s: Sequence[int] | None = None, + axes: Sequence[int] | None = (-2, -1), + norm: _NormKind = None, + out: NDArray[float64] | None = None, ) -> NDArray[float64]: ... diff --git a/numpy/fft/_pocketfft_umath.cpp b/numpy/fft/_pocketfft_umath.cpp index 525b5e5a23da..f616fe9b0bdc 100644 --- a/numpy/fft/_pocketfft_umath.cpp +++ b/numpy/fft/_pocketfft_umath.cpp @@ -32,7 +32,7 @@ template static void wrap_legacy_cpp_ufunc(char **args, npy_intp const *dimensions, - ptrdiff_t const *steps, void *func) + npy_intp const *steps, void *func) { NPY_ALLOW_C_API_DEF try { @@ -86,14 +86,14 @@ copy_output(T buff[], char *out, npy_intp step_out, size_t n) */ template static void -fft_loop(char **args, npy_intp const *dimensions, ptrdiff_t const *steps, +fft_loop(char **args, npy_intp const *dimensions, npy_intp const *steps, void *func) { char *ip = args[0], *fp = args[1], *op = args[2]; size_t n_outer = (size_t)dimensions[0]; - ptrdiff_t si = steps[0], sf = steps[1], so = steps[2]; + npy_intp si = steps[0], sf = steps[1], so = steps[2]; size_t nin = (size_t)dimensions[1], nout = (size_t)dimensions[2]; - ptrdiff_t step_in = steps[3], step_out = steps[4]; + npy_intp step_in = steps[3], step_out = steps[4]; bool direction = *((bool *)func); /* pocketfft::FORWARD or BACKWARD */ assert (nout > 0); @@ -144,9 +144,9 @@ rfft_impl(char **args, npy_intp const *dimensions, npy_intp const *steps, { char *ip = args[0], *fp = args[1], *op = args[2]; size_t n_outer = (size_t)dimensions[0]; - ptrdiff_t si = steps[0], sf = steps[1], so = steps[2]; + npy_intp si = steps[0], sf = steps[1], so = steps[2]; size_t nin = (size_t)dimensions[1], nout = (size_t)dimensions[2]; - ptrdiff_t step_in = steps[3], step_out = steps[4]; + npy_intp step_in = steps[3], step_out = steps[4]; assert (nout > 0 && nout == npts / 2 + 1); @@ -233,14 +233,13 @@ irfft_loop(char **args, npy_intp const *dimensions, npy_intp const *steps, void size_t nin = (size_t)dimensions[1], nout = (size_t)dimensions[2]; ptrdiff_t step_in = steps[3], step_out = steps[4]; - size_t npts_in = nout / 2 + 1; - assert(nout > 0); #ifndef POCKETFFT_NO_VECTORS /* * Call pocketfft directly if vectorization is possible. */ + size_t npts_in = nout / 2 + 1; constexpr auto vlen = pocketfft::detail::VLEN::val; if (vlen > 1 && n_outer >= vlen && nin >= npts_in && sf == 0) { std::vector axes = { 1 }; @@ -388,41 +387,57 @@ add_gufuncs(PyObject *dictionary) { return 0; } -static struct PyModuleDef moduledef = { - PyModuleDef_HEAD_INIT, - "_multiarray_umath", - NULL, - -1, - NULL, - NULL, - NULL, - NULL, - NULL -}; +static int module_loaded = 0; -/* Initialization function for the module */ -PyMODINIT_FUNC PyInit__pocketfft_umath(void) +static int +_pocketfft_umath_exec(PyObject *m) { - PyObject *m = PyModule_Create(&moduledef); - if (m == NULL) { - return NULL; + // https://docs.python.org/3/howto/isolating-extensions.html#opt-out-limiting-to-one-module-object-per-process + if (module_loaded) { + PyErr_SetString(PyExc_ImportError, + "cannot load module more than once per process"); + return -1; } + module_loaded = 1; /* Import the array and ufunc objects */ - import_array(); - import_ufunc(); + if (PyArray_ImportNumPyAPI() < 0) { + return -1; + } + if (PyUFunc_ImportUFuncAPI() < 0) { + return -1; + } PyObject *d = PyModule_GetDict(m); if (add_gufuncs(d) < 0) { Py_DECREF(d); - Py_DECREF(m); - return NULL; + return -1; } -#if Py_GIL_DISABLED - // signal this module supports running with the GIL disabled - PyUnstable_Module_SetGIL(m, Py_MOD_GIL_NOT_USED); + return 0; +} + +static struct PyModuleDef_Slot _pocketfft_umath_slots[] = { + {Py_mod_exec, (void*)_pocketfft_umath_exec}, +#if PY_VERSION_HEX >= 0x030c00f0 // Python 3.12+ + {Py_mod_multiple_interpreters, Py_MOD_MULTIPLE_INTERPRETERS_NOT_SUPPORTED}, +#endif +#if PY_VERSION_HEX >= 0x030d00f0 // Python 3.13+ + // signal that this module supports running without an active GIL + {Py_mod_gil, Py_MOD_GIL_NOT_USED}, #endif + {0, NULL}, +}; + +static struct PyModuleDef moduledef = { + PyModuleDef_HEAD_INIT, /* m_base */ + "_pocketfft_umath", /* m_name */ + NULL, /* m_doc */ + 0, /* m_size */ + NULL, /* m_methods */ + _pocketfft_umath_slots, /* m_slots */ +}; - return m; +PyMODINIT_FUNC PyInit__pocketfft_umath(void) { + return PyModuleDef_Init(&moduledef); } diff --git a/numpy/fft/helper.py b/numpy/fft/helper.py deleted file mode 100644 index 08d5662c6d17..000000000000 --- a/numpy/fft/helper.py +++ /dev/null @@ -1,17 +0,0 @@ -def __getattr__(attr_name): - import warnings - - from numpy.fft import _helper - ret = getattr(_helper, attr_name, None) - if ret is None: - raise AttributeError( - f"module 'numpy.fft.helper' has no attribute {attr_name}") - warnings.warn( - "The numpy.fft.helper has been made private and renamed to " - "numpy.fft._helper. All four functions exported by it (i.e. fftshift, " - "ifftshift, fftfreq, rfftfreq) are available from numpy.fft. " - f"Please use numpy.fft.{attr_name} instead.", - DeprecationWarning, - stacklevel=3 - ) - return ret diff --git a/numpy/fft/helper.pyi b/numpy/fft/helper.pyi deleted file mode 100644 index 887cbe7e27c9..000000000000 --- a/numpy/fft/helper.pyi +++ /dev/null @@ -1,22 +0,0 @@ -from typing import Any -from typing import Literal as L - -from typing_extensions import deprecated - -import numpy as np -from numpy._typing import ArrayLike, NDArray, _ShapeLike - -from ._helper import integer_types as integer_types - -__all__ = ["fftfreq", "fftshift", "ifftshift", "rfftfreq"] - -### - -@deprecated("Please use `numpy.fft.fftshift` instead.") -def fftshift(x: ArrayLike, axes: _ShapeLike | None = None) -> NDArray[Any]: ... -@deprecated("Please use `numpy.fft.ifftshift` instead.") -def ifftshift(x: ArrayLike, axes: _ShapeLike | None = None) -> NDArray[Any]: ... -@deprecated("Please use `numpy.fft.fftfreq` instead.") -def fftfreq(n: int | np.integer, d: ArrayLike = 1.0, device: L["cpu"] | None = None) -> NDArray[Any]: ... -@deprecated("Please use `numpy.fft.rfftfreq` instead.") -def rfftfreq(n: int | np.integer, d: ArrayLike = 1.0, device: L["cpu"] | None = None) -> NDArray[Any]: ... diff --git a/numpy/fft/meson.build b/numpy/fft/meson.build index e18949af5e31..a5b2413ebb90 100644 --- a/numpy/fft/meson.build +++ b/numpy/fft/meson.build @@ -23,8 +23,6 @@ py.install_sources( '_pocketfft.pyi', '_helper.py', '_helper.pyi', - 'helper.py', - 'helper.pyi', ], subdir: 'numpy/fft' ) diff --git a/numpy/fft/tests/test_pocketfft.py b/numpy/fft/tests/test_pocketfft.py index 021181845b3b..6f26ab6c6d65 100644 --- a/numpy/fft/tests/test_pocketfft.py +++ b/numpy/fft/tests/test_pocketfft.py @@ -55,7 +55,7 @@ def test_identity_long_short(self, dtype): def test_identity_long_short_reversed(self, dtype): # Also test explicitly given number of points in reversed order. maxlen = 16 - atol = 5 * np.spacing(np.array(1., dtype=dtype)) + atol = 6 * np.spacing(np.array(1., dtype=dtype)) x = random(maxlen).astype(dtype) + 1j * random(maxlen).astype(dtype) xx = np.concatenate([x, np.zeros_like(x)]) for i in range(1, maxlen * 2): diff --git a/numpy/lib/__init__.pyi b/numpy/lib/__init__.pyi index 8532ef8d9fb9..5a85743e4d0f 100644 --- a/numpy/lib/__init__.pyi +++ b/numpy/lib/__init__.pyi @@ -1,7 +1,30 @@ from numpy._core.function_base import add_newdoc from numpy._core.multiarray import add_docstring, tracemalloc_domain -from . import ( # noqa: F401 +# all submodules of `lib` are accessible at runtime through `__getattr__`, +# so we implicitly re-export them here +from . import ( + _array_utils_impl as _array_utils_impl, + _arraypad_impl as _arraypad_impl, + _arraysetops_impl as _arraysetops_impl, + _arrayterator_impl as _arrayterator_impl, + _datasource as _datasource, + _format_impl as _format_impl, + _function_base_impl as _function_base_impl, + _histograms_impl as _histograms_impl, + _index_tricks_impl as _index_tricks_impl, + _iotools as _iotools, + _nanfunctions_impl as _nanfunctions_impl, + _npyio_impl as _npyio_impl, + _polynomial_impl as _polynomial_impl, + _scimath_impl as _scimath_impl, + _shape_base_impl as _shape_base_impl, + _stride_tricks_impl as _stride_tricks_impl, + _twodim_base_impl as _twodim_base_impl, + _type_check_impl as _type_check_impl, + _ufunclike_impl as _ufunclike_impl, + _utils_impl as _utils_impl, + _version as _version, array_utils, format, introspect, @@ -18,6 +41,7 @@ __all__ = [ "add_docstring", "add_newdoc", "array_utils", + "format", "introspect", "mixins", "NumpyVersion", diff --git a/numpy/lib/_array_utils_impl.py b/numpy/lib/_array_utils_impl.py index c3996e1f2b92..25d78c1eb6a6 100644 --- a/numpy/lib/_array_utils_impl.py +++ b/numpy/lib/_array_utils_impl.py @@ -30,7 +30,7 @@ def byte_bounds(a): Examples -------- >>> import numpy as np - >>> I = np.eye(2, dtype='f'); I.dtype + >>> I = np.eye(2, dtype=np.float32); I.dtype dtype('float32') >>> low, high = np.lib.array_utils.byte_bounds(I) >>> high - low == I.size*I.itemsize diff --git a/numpy/lib/_array_utils_impl.pyi b/numpy/lib/_array_utils_impl.pyi index d3e0714773f2..e33507a127c9 100644 --- a/numpy/lib/_array_utils_impl.pyi +++ b/numpy/lib/_array_utils_impl.pyi @@ -1,8 +1,5 @@ -from collections.abc import Iterable -from typing import Any - -from numpy import generic -from numpy.typing import NDArray +import numpy as np +from numpy._core.numeric import normalize_axis_index, normalize_axis_tuple __all__ = ["byte_bounds", "normalize_axis_tuple", "normalize_axis_index"] @@ -10,17 +7,4 @@ __all__ = ["byte_bounds", "normalize_axis_tuple", "normalize_axis_index"] # implementing the `__array_interface__` protocol. The caveat is # that certain keys, marked as optional in the spec, must be present for # `byte_bounds`. This concerns `"strides"` and `"data"`. -def byte_bounds(a: generic | NDArray[Any]) -> tuple[int, int]: ... - -def normalize_axis_tuple( - axis: int | Iterable[int], - ndim: int = ..., - argname: str | None = ..., - allow_duplicate: bool | None = ..., -) -> tuple[int, int]: ... - -def normalize_axis_index( - axis: int = ..., - ndim: int = ..., - msg_prefix: str | None = ..., -) -> int: ... +def byte_bounds(a: np.generic | np.ndarray) -> tuple[int, int]: ... diff --git a/numpy/lib/_arraypad_impl.py b/numpy/lib/_arraypad_impl.py index 507a0ab51b52..681b92fc8a72 100644 --- a/numpy/lib/_arraypad_impl.py +++ b/numpy/lib/_arraypad_impl.py @@ -3,6 +3,8 @@ of an n-dimensional array. """ +import typing + import numpy as np from numpy._core.overrides import array_function_dispatch from numpy.lib._index_tricks_impl import ndindex @@ -550,7 +552,7 @@ def pad(array, pad_width, mode='constant', **kwargs): ---------- array : array_like of rank N The array to pad. - pad_width : {sequence, array_like, int} + pad_width : {sequence, array_like, int, dict} Number of values padded to the edges of each axis. ``((before_1, after_1), ... (before_N, after_N))`` unique pad widths for each axis. @@ -558,6 +560,9 @@ def pad(array, pad_width, mode='constant', **kwargs): and after pad for each axis. ``(pad,)`` or ``int`` is a shortcut for before = after = pad width for all axes. + If a ``dict``, each key is an axis and its corresponding value is an ``int`` or + ``int`` pair describing the padding ``(before, after)`` or ``pad`` width for + that axis. mode : str or function, optional One of the following string values or a user supplied function. @@ -745,8 +750,39 @@ def pad(array, pad_width, mode='constant', **kwargs): [100, 100, 3, 4, 5, 100, 100], [100, 100, 100, 100, 100, 100, 100], [100, 100, 100, 100, 100, 100, 100]]) + + >>> a = np.arange(1, 7).reshape(2, 3) + >>> np.pad(a, {1: (1, 2)}) + array([[0, 1, 2, 3, 0, 0], + [0, 4, 5, 6, 0, 0]]) + >>> np.pad(a, {-1: 2}) + array([[0, 0, 1, 2, 3, 0, 0], + [0, 0, 4, 5, 6, 0, 0]]) + >>> np.pad(a, {0: (3, 0)}) + array([[0, 0, 0], + [0, 0, 0], + [0, 0, 0], + [1, 2, 3], + [4, 5, 6]]) + >>> np.pad(a, {0: (3, 0), 1: 2}) + array([[0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 1, 2, 3, 0, 0], + [0, 0, 4, 5, 6, 0, 0]]) """ array = np.asarray(array) + if isinstance(pad_width, dict): + seq = [(0, 0)] * array.ndim + for axis, width in pad_width.items(): + match width: + case int(both): + seq[axis] = both, both + case tuple((int(before), int(after))): + seq[axis] = before, after + case _ as invalid: + typing.assert_never(invalid) + pad_width = seq pad_width = np.asarray(pad_width) if not pad_width.dtype.kind == 'i': diff --git a/numpy/lib/_arraypad_impl.pyi b/numpy/lib/_arraypad_impl.pyi index 46b43762b87f..da7c89859d86 100644 --- a/numpy/lib/_arraypad_impl.pyi +++ b/numpy/lib/_arraypad_impl.pyi @@ -1,27 +1,10 @@ -from typing import ( - Any, - Protocol, - TypeAlias, - TypeVar, - overload, - type_check_only, -) -from typing import ( - Literal as L, -) +from typing import Any, Literal as L, Protocol, overload, type_check_only -from numpy import generic -from numpy._typing import ( - ArrayLike, - NDArray, - _ArrayLike, - _ArrayLikeInt, -) +import numpy as np +from numpy._typing import ArrayLike, NDArray, _ArrayLike, _ArrayLikeInt __all__ = ["pad"] -_ScalarT = TypeVar("_ScalarT", bound=generic) - @type_check_only class _ModeFunc(Protocol): def __call__( @@ -33,7 +16,7 @@ class _ModeFunc(Protocol): /, ) -> None: ... -_ModeKind: TypeAlias = L[ +type _ModeKind = L[ "constant", "edge", "linear_ramp", @@ -47,43 +30,52 @@ _ModeKind: TypeAlias = L[ "empty", ] +type _PadWidth = ( + _ArrayLikeInt + | dict[int, int] + | dict[int, tuple[int, int]] + | dict[int, int | tuple[int, int]] +) + +### + # TODO: In practice each keyword argument is exclusive to one or more # specific modes. Consider adding more overloads to express this in the future. # Expand `**kwargs` into explicit keyword-only arguments @overload -def pad( - array: _ArrayLike[_ScalarT], - pad_width: _ArrayLikeInt, - mode: _ModeKind = ..., +def pad[ScalarT: np.generic]( + array: _ArrayLike[ScalarT], + pad_width: _PadWidth, + mode: _ModeKind = "constant", *, - stat_length: _ArrayLikeInt | None = ..., - constant_values: ArrayLike = ..., - end_values: ArrayLike = ..., - reflect_type: L["odd", "even"] = ..., -) -> NDArray[_ScalarT]: ... + stat_length: _ArrayLikeInt | None = None, + constant_values: ArrayLike = 0, + end_values: ArrayLike = 0, + reflect_type: L["odd", "even"] = "even", +) -> NDArray[ScalarT]: ... @overload def pad( array: ArrayLike, - pad_width: _ArrayLikeInt, - mode: _ModeKind = ..., + pad_width: _PadWidth, + mode: _ModeKind = "constant", *, - stat_length: _ArrayLikeInt | None = ..., - constant_values: ArrayLike = ..., - end_values: ArrayLike = ..., - reflect_type: L["odd", "even"] = ..., + stat_length: _ArrayLikeInt | None = None, + constant_values: ArrayLike = 0, + end_values: ArrayLike = 0, + reflect_type: L["odd", "even"] = "even", ) -> NDArray[Any]: ... @overload -def pad( - array: _ArrayLike[_ScalarT], - pad_width: _ArrayLikeInt, +def pad[ScalarT: np.generic]( + array: _ArrayLike[ScalarT], + pad_width: _PadWidth, mode: _ModeFunc, **kwargs: Any, -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload def pad( array: ArrayLike, - pad_width: _ArrayLikeInt, + pad_width: _PadWidth, mode: _ModeFunc, **kwargs: Any, ) -> NDArray[Any]: ... diff --git a/numpy/lib/_arraysetops_impl.py b/numpy/lib/_arraysetops_impl.py index ef0739ba486f..5d521b1fba60 100644 --- a/numpy/lib/_arraysetops_impl.py +++ b/numpy/lib/_arraysetops_impl.py @@ -15,19 +15,19 @@ """ import functools -import warnings from typing import NamedTuple import numpy as np from numpy._core import overrides from numpy._core._multiarray_umath import _array_converter, _unique_hash +from numpy.lib.array_utils import normalize_axis_index array_function_dispatch = functools.partial( overrides.array_function_dispatch, module='numpy') __all__ = [ - "ediff1d", "in1d", "intersect1d", "isin", "setdiff1d", "setxor1d", + "ediff1d", "intersect1d", "isin", "setdiff1d", "setxor1d", "union1d", "unique", "unique_all", "unique_counts", "unique_inverse", "unique_values" ] @@ -290,7 +290,9 @@ def unique(ar, return_index=False, return_inverse=False, """ ar = np.asanyarray(ar) - if axis is None: + if axis is None or ar.ndim == 1: + if axis is not None: + normalize_axis_index(axis, ar.ndim) ret = _unique1d(ar, return_index, return_inverse, return_counts, equal_nan=equal_nan, inverse_shape=ar.shape, axis=None, sorted=sorted) @@ -368,7 +370,8 @@ def _unique1d(ar, return_index=False, return_inverse=False, conv = _array_converter(ar) ar_, = conv - if (hash_unique := _unique_hash(ar_)) is not NotImplemented: + if (hash_unique := _unique_hash(ar_, equal_nan=equal_nan)) \ + is not NotImplemented: if sorted: hash_unique.sort() # We wrap the result back in case it was a subclass of numpy.ndarray. @@ -800,112 +803,7 @@ def setxor1d(ar1, ar2, assume_unique=False): return aux[flag[1:] & flag[:-1]] -def _in1d_dispatcher(ar1, ar2, assume_unique=None, invert=None, *, - kind=None): - return (ar1, ar2) - - -@array_function_dispatch(_in1d_dispatcher) -def in1d(ar1, ar2, assume_unique=False, invert=False, *, kind=None): - """ - Test whether each element of a 1-D array is also present in a second array. - - .. deprecated:: 2.0 - Use :func:`isin` instead of `in1d` for new code. - - Returns a boolean array the same length as `ar1` that is True - where an element of `ar1` is in `ar2` and False otherwise. - - Parameters - ---------- - ar1 : (M,) array_like - Input array. - ar2 : array_like - The values against which to test each value of `ar1`. - assume_unique : bool, optional - If True, the input arrays are both assumed to be unique, which - can speed up the calculation. Default is False. - invert : bool, optional - If True, the values in the returned array are inverted (that is, - False where an element of `ar1` is in `ar2` and True otherwise). - Default is False. ``np.in1d(a, b, invert=True)`` is equivalent - to (but is faster than) ``np.invert(in1d(a, b))``. - kind : {None, 'sort', 'table'}, optional - The algorithm to use. This will not affect the final result, - but will affect the speed and memory use. The default, None, - will select automatically based on memory considerations. - - * If 'sort', will use a mergesort-based approach. This will have - a memory usage of roughly 6 times the sum of the sizes of - `ar1` and `ar2`, not accounting for size of dtypes. - * If 'table', will use a lookup table approach similar - to a counting sort. This is only available for boolean and - integer arrays. This will have a memory usage of the - size of `ar1` plus the max-min value of `ar2`. `assume_unique` - has no effect when the 'table' option is used. - * If None, will automatically choose 'table' if - the required memory allocation is less than or equal to - 6 times the sum of the sizes of `ar1` and `ar2`, - otherwise will use 'sort'. This is done to not use - a large amount of memory by default, even though - 'table' may be faster in most cases. If 'table' is chosen, - `assume_unique` will have no effect. - - Returns - ------- - in1d : (M,) ndarray, bool - The values `ar1[in1d]` are in `ar2`. - - See Also - -------- - isin : Version of this function that preserves the - shape of ar1. - - Notes - ----- - `in1d` can be considered as an element-wise function version of the - python keyword `in`, for 1-D sequences. ``in1d(a, b)`` is roughly - equivalent to ``np.array([item in b for item in a])``. - However, this idea fails if `ar2` is a set, or similar (non-sequence) - container: As ``ar2`` is converted to an array, in those cases - ``asarray(ar2)`` is an object array rather than the expected array of - contained values. - - Using ``kind='table'`` tends to be faster than `kind='sort'` if the - following relationship is true: - ``log10(len(ar2)) > (log10(max(ar2)-min(ar2)) - 2.27) / 0.927``, - but may use greater memory. The default value for `kind` will - be automatically selected based only on memory usage, so one may - manually set ``kind='table'`` if memory constraints can be relaxed. - - Examples - -------- - >>> import numpy as np - >>> test = np.array([0, 1, 2, 5, 0]) - >>> states = [0, 2] - >>> mask = np.in1d(test, states) - >>> mask - array([ True, False, True, False, True]) - >>> test[mask] - array([0, 2, 0]) - >>> mask = np.in1d(test, states, invert=True) - >>> mask - array([False, True, False, True, False]) - >>> test[mask] - array([1, 5]) - """ - - # Deprecated in NumPy 2.0, 2023-08-18 - warnings.warn( - "`in1d` is deprecated. Use `np.isin` instead.", - DeprecationWarning, - stacklevel=2 - ) - - return _in1d(ar1, ar2, assume_unique, invert, kind=kind) - - -def _in1d(ar1, ar2, assume_unique=False, invert=False, *, kind=None): +def _isin(ar1, ar2, assume_unique=False, invert=False, *, kind=None): # Ravel both arrays, behavior for the first array could be different ar1 = np.asarray(ar1).ravel() ar2 = np.asarray(ar2).ravel() @@ -1174,7 +1072,7 @@ def isin(element, test_elements, assume_unique=False, invert=False, *, [ True, False]]) """ element = np.asarray(element) - return _in1d(element, test_elements, assume_unique=assume_unique, + return _isin(element, test_elements, assume_unique=assume_unique, invert=invert, kind=kind).reshape(element.shape) @@ -1257,4 +1155,4 @@ def setdiff1d(ar1, ar2, assume_unique=False): else: ar1 = unique(ar1) ar2 = unique(ar2) - return ar1[_in1d(ar1, ar2, assume_unique=True, invert=True)] + return ar1[_isin(ar1, ar2, assume_unique=True, invert=True)] diff --git a/numpy/lib/_arraysetops_impl.pyi b/numpy/lib/_arraysetops_impl.pyi index a7ad5b9d91e7..b0b918bd3e23 100644 --- a/numpy/lib/_arraysetops_impl.pyi +++ b/numpy/lib/_arraysetops_impl.pyi @@ -1,7 +1,4 @@ -from typing import Any, Generic, NamedTuple, SupportsIndex, TypeAlias, overload -from typing import Literal as L - -from typing_extensions import TypeVar, deprecated +from typing import Any, Literal as L, NamedTuple, SupportsIndex, TypeVar, overload import numpy as np from numpy._typing import ( @@ -14,7 +11,6 @@ from numpy._typing import ( __all__ = [ "ediff1d", - "in1d", "intersect1d", "isin", "setdiff1d", @@ -27,16 +23,13 @@ __all__ = [ "unique_values", ] -_ScalarT = TypeVar("_ScalarT", bound=np.generic) -_NumericT = TypeVar("_NumericT", bound=np.number | np.timedelta64 | np.object_) - # Explicitly set all allowed values to prevent accidental castings to # abstract dtypes (their common super-type). # Only relevant if two or more arguments are parametrized, (e.g. `setdiff1d`) # which could result in, for example, `int64` and `float64`producing a # `number[_64Bit]` array -_EitherSCT = TypeVar( - "_EitherSCT", +_AnyScalarT = TypeVar( + "_AnyScalarT", np.bool, np.int8, np.int16, np.int32, np.int64, np.intp, np.uint8, np.uint16, np.uint32, np.uint64, np.uintp, @@ -47,23 +40,23 @@ _EitherSCT = TypeVar( np.integer, np.floating, np.complexfloating, np.character, ) # fmt: skip -_AnyArray: TypeAlias = NDArray[Any] -_IntArray: TypeAlias = NDArray[np.intp] +type _NumericScalar = np.number | np.timedelta64 | np.object_ +type _IntArray = NDArray[np.intp] ### -class UniqueAllResult(NamedTuple, Generic[_ScalarT]): - values: NDArray[_ScalarT] +class UniqueAllResult[ScalarT: np.generic](NamedTuple): + values: NDArray[ScalarT] indices: _IntArray inverse_indices: _IntArray counts: _IntArray -class UniqueCountsResult(NamedTuple, Generic[_ScalarT]): - values: NDArray[_ScalarT] +class UniqueCountsResult[ScalarT: np.generic](NamedTuple): + values: NDArray[ScalarT] counts: _IntArray -class UniqueInverseResult(NamedTuple, Generic[_ScalarT]): - values: NDArray[_ScalarT] +class UniqueInverseResult[ScalarT: np.generic](NamedTuple): + values: NDArray[ScalarT] inverse_indices: _IntArray # @@ -74,11 +67,11 @@ def ediff1d( to_begin: ArrayLike | None = None, ) -> NDArray[np.int8]: ... @overload -def ediff1d( - ary: _ArrayLike[_NumericT], +def ediff1d[NumericT: _NumericScalar]( + ary: _ArrayLike[NumericT], to_end: ArrayLike | None = None, to_begin: ArrayLike | None = None, -) -> NDArray[_NumericT]: ... +) -> NDArray[NumericT]: ... @overload def ediff1d( ary: _ArrayLike[np.datetime64[Any]], @@ -90,19 +83,20 @@ def ediff1d( ary: _ArrayLikeNumber_co, to_end: ArrayLike | None = None, to_begin: ArrayLike | None = None, -) -> _AnyArray: ... +) -> np.ndarray: ... # @overload # known scalar-type, FFF -def unique( - ar: _ArrayLike[_ScalarT], +def unique[ScalarT: np.generic]( + ar: _ArrayLike[ScalarT], return_index: L[False] = False, return_inverse: L[False] = False, return_counts: L[False] = False, axis: SupportsIndex | None = None, *, equal_nan: bool = True, -) -> NDArray[_ScalarT]: ... + sorted: bool = True, +) -> NDArray[ScalarT]: ... @overload # unknown scalar-type, FFF def unique( ar: ArrayLike, @@ -112,17 +106,19 @@ def unique( axis: SupportsIndex | None = None, *, equal_nan: bool = True, -) -> _AnyArray: ... + sorted: bool = True, +) -> np.ndarray: ... @overload # known scalar-type, TFF -def unique( - ar: _ArrayLike[_ScalarT], +def unique[ScalarT: np.generic]( + ar: _ArrayLike[ScalarT], return_index: L[True], return_inverse: L[False] = False, return_counts: L[False] = False, axis: SupportsIndex | None = None, *, equal_nan: bool = True, -) -> tuple[NDArray[_ScalarT], _IntArray]: ... + sorted: bool = True, +) -> tuple[NDArray[ScalarT], _IntArray]: ... @overload # unknown scalar-type, TFF def unique( ar: ArrayLike, @@ -132,27 +128,30 @@ def unique( axis: SupportsIndex | None = None, *, equal_nan: bool = True, -) -> tuple[_AnyArray, _IntArray]: ... + sorted: bool = True, +) -> tuple[np.ndarray, _IntArray]: ... @overload # known scalar-type, FTF (positional) -def unique( - ar: _ArrayLike[_ScalarT], +def unique[ScalarT: np.generic]( + ar: _ArrayLike[ScalarT], return_index: L[False], return_inverse: L[True], return_counts: L[False] = False, axis: SupportsIndex | None = None, *, equal_nan: bool = True, -) -> tuple[NDArray[_ScalarT], _IntArray]: ... + sorted: bool = True, +) -> tuple[NDArray[ScalarT], _IntArray]: ... @overload # known scalar-type, FTF (keyword) -def unique( - ar: _ArrayLike[_ScalarT], +def unique[ScalarT: np.generic]( + ar: _ArrayLike[ScalarT], return_index: L[False] = False, *, return_inverse: L[True], return_counts: L[False] = False, axis: SupportsIndex | None = None, equal_nan: bool = True, -) -> tuple[NDArray[_ScalarT], _IntArray]: ... + sorted: bool = True, +) -> tuple[NDArray[ScalarT], _IntArray]: ... @overload # unknown scalar-type, FTF (positional) def unique( ar: ArrayLike, @@ -162,7 +161,8 @@ def unique( axis: SupportsIndex | None = None, *, equal_nan: bool = True, -) -> tuple[_AnyArray, _IntArray]: ... + sorted: bool = True, +) -> tuple[np.ndarray, _IntArray]: ... @overload # unknown scalar-type, FTF (keyword) def unique( ar: ArrayLike, @@ -172,27 +172,30 @@ def unique( return_counts: L[False] = False, axis: SupportsIndex | None = None, equal_nan: bool = True, -) -> tuple[_AnyArray, _IntArray]: ... + sorted: bool = True, +) -> tuple[np.ndarray, _IntArray]: ... @overload # known scalar-type, FFT (positional) -def unique( - ar: _ArrayLike[_ScalarT], +def unique[ScalarT: np.generic]( + ar: _ArrayLike[ScalarT], return_index: L[False], return_inverse: L[False], return_counts: L[True], axis: SupportsIndex | None = None, *, equal_nan: bool = True, -) -> tuple[NDArray[_ScalarT], _IntArray]: ... + sorted: bool = True, +) -> tuple[NDArray[ScalarT], _IntArray]: ... @overload # known scalar-type, FFT (keyword) -def unique( - ar: _ArrayLike[_ScalarT], +def unique[ScalarT: np.generic]( + ar: _ArrayLike[ScalarT], return_index: L[False] = False, return_inverse: L[False] = False, *, return_counts: L[True], axis: SupportsIndex | None = None, equal_nan: bool = True, -) -> tuple[NDArray[_ScalarT], _IntArray]: ... + sorted: bool = True, +) -> tuple[NDArray[ScalarT], _IntArray]: ... @overload # unknown scalar-type, FFT (positional) def unique( ar: ArrayLike, @@ -202,7 +205,8 @@ def unique( axis: SupportsIndex | None = None, *, equal_nan: bool = True, -) -> tuple[_AnyArray, _IntArray]: ... + sorted: bool = True, +) -> tuple[np.ndarray, _IntArray]: ... @overload # unknown scalar-type, FFT (keyword) def unique( ar: ArrayLike, @@ -212,17 +216,19 @@ def unique( return_counts: L[True], axis: SupportsIndex | None = None, equal_nan: bool = True, -) -> tuple[_AnyArray, _IntArray]: ... + sorted: bool = True, +) -> tuple[np.ndarray, _IntArray]: ... @overload # known scalar-type, TTF -def unique( - ar: _ArrayLike[_ScalarT], +def unique[ScalarT: np.generic]( + ar: _ArrayLike[ScalarT], return_index: L[True], return_inverse: L[True], return_counts: L[False] = False, axis: SupportsIndex | None = None, *, equal_nan: bool = True, -) -> tuple[NDArray[_ScalarT], _IntArray, _IntArray]: ... + sorted: bool = True, +) -> tuple[NDArray[ScalarT], _IntArray, _IntArray]: ... @overload # unknown scalar-type, TTF def unique( ar: ArrayLike, @@ -232,27 +238,30 @@ def unique( axis: SupportsIndex | None = None, *, equal_nan: bool = True, -) -> tuple[_AnyArray, _IntArray, _IntArray]: ... + sorted: bool = True, +) -> tuple[np.ndarray, _IntArray, _IntArray]: ... @overload # known scalar-type, TFT (positional) -def unique( - ar: _ArrayLike[_ScalarT], +def unique[ScalarT: np.generic]( + ar: _ArrayLike[ScalarT], return_index: L[True], return_inverse: L[False], return_counts: L[True], axis: SupportsIndex | None = None, *, equal_nan: bool = True, -) -> tuple[NDArray[_ScalarT], _IntArray, _IntArray]: ... + sorted: bool = True, +) -> tuple[NDArray[ScalarT], _IntArray, _IntArray]: ... @overload # known scalar-type, TFT (keyword) -def unique( - ar: _ArrayLike[_ScalarT], +def unique[ScalarT: np.generic]( + ar: _ArrayLike[ScalarT], return_index: L[True], return_inverse: L[False] = False, *, return_counts: L[True], axis: SupportsIndex | None = None, equal_nan: bool = True, -) -> tuple[NDArray[_ScalarT], _IntArray, _IntArray]: ... + sorted: bool = True, +) -> tuple[NDArray[ScalarT], _IntArray, _IntArray]: ... @overload # unknown scalar-type, TFT (positional) def unique( ar: ArrayLike, @@ -262,7 +271,8 @@ def unique( axis: SupportsIndex | None = None, *, equal_nan: bool = True, -) -> tuple[_AnyArray, _IntArray, _IntArray]: ... + sorted: bool = True, +) -> tuple[np.ndarray, _IntArray, _IntArray]: ... @overload # unknown scalar-type, TFT (keyword) def unique( ar: ArrayLike, @@ -272,27 +282,30 @@ def unique( return_counts: L[True], axis: SupportsIndex | None = None, equal_nan: bool = True, -) -> tuple[_AnyArray, _IntArray, _IntArray]: ... + sorted: bool = True, +) -> tuple[np.ndarray, _IntArray, _IntArray]: ... @overload # known scalar-type, FTT (positional) -def unique( - ar: _ArrayLike[_ScalarT], +def unique[ScalarT: np.generic]( + ar: _ArrayLike[ScalarT], return_index: L[False], return_inverse: L[True], return_counts: L[True], axis: SupportsIndex | None = None, *, equal_nan: bool = True, -) -> tuple[NDArray[_ScalarT], _IntArray, _IntArray]: ... + sorted: bool = True, +) -> tuple[NDArray[ScalarT], _IntArray, _IntArray]: ... @overload # known scalar-type, FTT (keyword) -def unique( - ar: _ArrayLike[_ScalarT], +def unique[ScalarT: np.generic]( + ar: _ArrayLike[ScalarT], return_index: L[False] = False, *, return_inverse: L[True], return_counts: L[True], axis: SupportsIndex | None = None, equal_nan: bool = True, -) -> tuple[NDArray[_ScalarT], _IntArray, _IntArray]: ... + sorted: bool = True, +) -> tuple[NDArray[ScalarT], _IntArray, _IntArray]: ... @overload # unknown scalar-type, FTT (positional) def unique( ar: ArrayLike, @@ -302,7 +315,8 @@ def unique( axis: SupportsIndex | None = None, *, equal_nan: bool = True, -) -> tuple[_AnyArray, _IntArray, _IntArray]: ... + sorted: bool = True, +) -> tuple[np.ndarray, _IntArray, _IntArray]: ... @overload # unknown scalar-type, FTT (keyword) def unique( ar: ArrayLike, @@ -312,17 +326,19 @@ def unique( return_counts: L[True], axis: SupportsIndex | None = None, equal_nan: bool = True, -) -> tuple[_AnyArray, _IntArray, _IntArray]: ... + sorted: bool = True, +) -> tuple[np.ndarray, _IntArray, _IntArray]: ... @overload # known scalar-type, TTT -def unique( - ar: _ArrayLike[_ScalarT], +def unique[ScalarT: np.generic]( + ar: _ArrayLike[ScalarT], return_index: L[True], return_inverse: L[True], return_counts: L[True], axis: SupportsIndex | None = None, *, equal_nan: bool = True, -) -> tuple[NDArray[_ScalarT], _IntArray, _IntArray, _IntArray]: ... + sorted: bool = True, +) -> tuple[NDArray[ScalarT], _IntArray, _IntArray, _IntArray]: ... @overload # unknown scalar-type, TTT def unique( ar: ArrayLike, @@ -332,69 +348,70 @@ def unique( axis: SupportsIndex | None = None, *, equal_nan: bool = True, -) -> tuple[_AnyArray, _IntArray, _IntArray, _IntArray]: ... + sorted: bool = True, +) -> tuple[np.ndarray, _IntArray, _IntArray, _IntArray]: ... # @overload -def unique_all(x: _ArrayLike[_ScalarT]) -> UniqueAllResult[_ScalarT]: ... +def unique_all[ScalarT: np.generic](x: _ArrayLike[ScalarT]) -> UniqueAllResult[ScalarT]: ... @overload def unique_all(x: ArrayLike) -> UniqueAllResult[Any]: ... # @overload -def unique_counts(x: _ArrayLike[_ScalarT]) -> UniqueCountsResult[_ScalarT]: ... +def unique_counts[ScalarT: np.generic](x: _ArrayLike[ScalarT]) -> UniqueCountsResult[ScalarT]: ... @overload def unique_counts(x: ArrayLike) -> UniqueCountsResult[Any]: ... # @overload -def unique_inverse(x: _ArrayLike[_ScalarT]) -> UniqueInverseResult[_ScalarT]: ... +def unique_inverse[ScalarT: np.generic](x: _ArrayLike[ScalarT]) -> UniqueInverseResult[ScalarT]: ... @overload def unique_inverse(x: ArrayLike) -> UniqueInverseResult[Any]: ... # @overload -def unique_values(x: _ArrayLike[_ScalarT]) -> NDArray[_ScalarT]: ... +def unique_values[ScalarT: np.generic](x: _ArrayLike[ScalarT]) -> NDArray[ScalarT]: ... @overload -def unique_values(x: ArrayLike) -> _AnyArray: ... +def unique_values(x: ArrayLike) -> np.ndarray: ... # @overload # known scalar-type, return_indices=False (default) def intersect1d( - ar1: _ArrayLike[_EitherSCT], - ar2: _ArrayLike[_EitherSCT], + ar1: _ArrayLike[_AnyScalarT], + ar2: _ArrayLike[_AnyScalarT], assume_unique: bool = False, return_indices: L[False] = False, -) -> NDArray[_EitherSCT]: ... +) -> NDArray[_AnyScalarT]: ... @overload # known scalar-type, return_indices=True (positional) def intersect1d( - ar1: _ArrayLike[_EitherSCT], - ar2: _ArrayLike[_EitherSCT], + ar1: _ArrayLike[_AnyScalarT], + ar2: _ArrayLike[_AnyScalarT], assume_unique: bool, return_indices: L[True], -) -> tuple[NDArray[_EitherSCT], _IntArray, _IntArray]: ... +) -> tuple[NDArray[_AnyScalarT], _IntArray, _IntArray]: ... @overload # known scalar-type, return_indices=True (keyword) def intersect1d( - ar1: _ArrayLike[_EitherSCT], - ar2: _ArrayLike[_EitherSCT], + ar1: _ArrayLike[_AnyScalarT], + ar2: _ArrayLike[_AnyScalarT], assume_unique: bool = False, *, return_indices: L[True], -) -> tuple[NDArray[_EitherSCT], _IntArray, _IntArray]: ... +) -> tuple[NDArray[_AnyScalarT], _IntArray, _IntArray]: ... @overload # unknown scalar-type, return_indices=False (default) def intersect1d( ar1: ArrayLike, ar2: ArrayLike, assume_unique: bool = False, return_indices: L[False] = False, -) -> _AnyArray: ... +) -> np.ndarray: ... @overload # unknown scalar-type, return_indices=True (positional) def intersect1d( ar1: ArrayLike, ar2: ArrayLike, assume_unique: bool, return_indices: L[True], -) -> tuple[_AnyArray, _IntArray, _IntArray]: ... +) -> tuple[np.ndarray, _IntArray, _IntArray]: ... @overload # unknown scalar-type, return_indices=True (keyword) def intersect1d( ar1: ArrayLike, @@ -402,25 +419,25 @@ def intersect1d( assume_unique: bool = False, *, return_indices: L[True], -) -> tuple[_AnyArray, _IntArray, _IntArray]: ... +) -> tuple[np.ndarray, _IntArray, _IntArray]: ... # @overload -def setxor1d(ar1: _ArrayLike[_EitherSCT], ar2: _ArrayLike[_EitherSCT], assume_unique: bool = False) -> NDArray[_EitherSCT]: ... +def setxor1d(ar1: _ArrayLike[_AnyScalarT], ar2: _ArrayLike[_AnyScalarT], assume_unique: bool = False) -> NDArray[_AnyScalarT]: ... @overload -def setxor1d(ar1: ArrayLike, ar2: ArrayLike, assume_unique: bool = False) -> _AnyArray: ... +def setxor1d(ar1: ArrayLike, ar2: ArrayLike, assume_unique: bool = False) -> np.ndarray: ... # @overload -def union1d(ar1: _ArrayLike[_EitherSCT], ar2: _ArrayLike[_EitherSCT]) -> NDArray[_EitherSCT]: ... +def union1d(ar1: _ArrayLike[_AnyScalarT], ar2: _ArrayLike[_AnyScalarT]) -> NDArray[_AnyScalarT]: ... @overload -def union1d(ar1: ArrayLike, ar2: ArrayLike) -> _AnyArray: ... +def union1d(ar1: ArrayLike, ar2: ArrayLike) -> np.ndarray: ... # @overload -def setdiff1d(ar1: _ArrayLike[_EitherSCT], ar2: _ArrayLike[_EitherSCT], assume_unique: bool = False) -> NDArray[_EitherSCT]: ... +def setdiff1d(ar1: _ArrayLike[_AnyScalarT], ar2: _ArrayLike[_AnyScalarT], assume_unique: bool = False) -> NDArray[_AnyScalarT]: ... @overload -def setdiff1d(ar1: ArrayLike, ar2: ArrayLike, assume_unique: bool = False) -> _AnyArray: ... +def setdiff1d(ar1: ArrayLike, ar2: ArrayLike, assume_unique: bool = False) -> np.ndarray: ... # def isin( @@ -431,14 +448,3 @@ def isin( *, kind: L["sort", "table"] | None = None, ) -> NDArray[np.bool]: ... - -# -@deprecated("Use 'isin' instead") -def in1d( - element: ArrayLike, - test_elements: ArrayLike, - assume_unique: bool = False, - invert: bool = False, - *, - kind: L["sort", "table"] | None = None, -) -> NDArray[np.bool]: ... diff --git a/numpy/lib/_arrayterator_impl.pyi b/numpy/lib/_arrayterator_impl.pyi index e1a9e056a6e1..a1a4428885fd 100644 --- a/numpy/lib/_arrayterator_impl.pyi +++ b/numpy/lib/_arrayterator_impl.pyi @@ -2,8 +2,7 @@ from collections.abc import Generator from types import EllipsisType -from typing import Any, Final, TypeAlias, overload - +from typing import Any, Final, overload from typing_extensions import TypeVar import numpy as np @@ -11,12 +10,11 @@ from numpy._typing import _AnyShape, _Shape __all__ = ["Arrayterator"] +# Type parameter default syntax (PEP 696) requires Python 3.13+ _ShapeT_co = TypeVar("_ShapeT_co", bound=_Shape, default=_AnyShape, covariant=True) -_DTypeT = TypeVar("_DTypeT", bound=np.dtype) _DTypeT_co = TypeVar("_DTypeT_co", bound=np.dtype, default=np.dtype, covariant=True) -_ScalarT = TypeVar("_ScalarT", bound=np.generic) -_AnyIndex: TypeAlias = EllipsisType | int | slice | tuple[EllipsisType | int | slice, ...] +type _AnyIndex = EllipsisType | int | slice | tuple[EllipsisType | int | slice, ...] # NOTE: In reality `Arrayterator` does not actually inherit from `ndarray`, # but its ``__getattr__` method does wrap around the former and thus has @@ -32,7 +30,7 @@ class Arrayterator(np.ndarray[_ShapeT_co, _DTypeT_co]): @property # type: ignore[misc] def shape(self) -> _ShapeT_co: ... @property - def flat(self: Arrayterator[Any, np.dtype[_ScalarT]]) -> Generator[_ScalarT]: ... # type: ignore[override] + def flat[ScalarT: np.generic](self: Arrayterator[Any, np.dtype[ScalarT]]) -> Generator[ScalarT]: ... # type: ignore[override] # def __init__(self, /, var: np.ndarray[_ShapeT_co, _DTypeT_co], buf_size: int | None = None) -> None: ... @@ -40,7 +38,7 @@ class Arrayterator(np.ndarray[_ShapeT_co, _DTypeT_co]): def __iter__(self) -> Generator[np.ndarray[_AnyShape, _DTypeT_co]]: ... # - @overload # type: ignore[override] + @overload def __array__(self, /, dtype: None = None, copy: bool | None = None) -> np.ndarray[_ShapeT_co, _DTypeT_co]: ... @overload - def __array__(self, /, dtype: _DTypeT, copy: bool | None = None) -> np.ndarray[_ShapeT_co, _DTypeT]: ... + def __array__[DTypeT: np.dtype](self, /, dtype: DTypeT, copy: bool | None = None) -> np.ndarray[_ShapeT_co, DTypeT]: ... diff --git a/numpy/lib/_datasource.pyi b/numpy/lib/_datasource.pyi index 9f91fdf893a0..33af9cf1b197 100644 --- a/numpy/lib/_datasource.pyi +++ b/numpy/lib/_datasource.pyi @@ -1,15 +1,14 @@ -from pathlib import Path -from typing import IO, Any, TypeAlias - from _typeshed import OpenBinaryMode, OpenTextMode +from pathlib import Path +from typing import IO, Any -_Mode: TypeAlias = OpenBinaryMode | OpenTextMode +type _Mode = OpenBinaryMode | OpenTextMode ### # exported in numpy.lib.nppyio class DataSource: - def __init__(self, /, destpath: Path | str | None = ...) -> None: ... + def __init__(self, /, destpath: Path | str | None = ".") -> None: ... def __del__(self, /) -> None: ... def abspath(self, /, path: str) -> str: ... def exists(self, /, path: str) -> bool: ... @@ -19,13 +18,13 @@ class DataSource: def open(self, /, path: str, mode: _Mode = "r", encoding: str | None = None, newline: str | None = None) -> IO[Any]: ... class Repository(DataSource): - def __init__(self, /, baseurl: str, destpath: str | None = ...) -> None: ... + def __init__(self, /, baseurl: str, destpath: str | None = ".") -> None: ... def listdir(self, /) -> list[str]: ... def open( path: str, mode: _Mode = "r", - destpath: str | None = ..., + destpath: str | None = ".", encoding: str | None = None, newline: str | None = None, ) -> IO[Any]: ... diff --git a/numpy/lib/_format_impl.py b/numpy/lib/_format_impl.py index 7378ba554810..2bb557709c8b 100644 --- a/numpy/lib/_format_impl.py +++ b/numpy/lib/_format_impl.py @@ -645,7 +645,7 @@ def _read_array_header(fp, version, max_header_size=_MAX_HEADER_SIZE): "may be necessary.") # The header is a pretty-printed string representation of a literal - # Python dictionary with trailing newlines padded to a ARRAY_ALIGN byte + # Python dictionary with trailing newlines padded to an ARRAY_ALIGN byte # boundary. The keys are strings. # "shape" : tuple of int # "fortran_order" : bool @@ -879,10 +879,10 @@ def read_array(fp, allow_pickle=False, pickle_kwargs=None, *, ) if fortran_order: - array.shape = shape[::-1] + array = array.reshape(shape[::-1]) array = array.transpose() else: - array.shape = shape + array = array.reshape(shape) return array diff --git a/numpy/lib/_format_impl.pyi b/numpy/lib/_format_impl.pyi index f4898d9aefa4..f8b9a7ab88a9 100644 --- a/numpy/lib/_format_impl.pyi +++ b/numpy/lib/_format_impl.pyi @@ -1,26 +1,56 @@ -from typing import Final, Literal +import os +from _typeshed import SupportsRead, SupportsWrite +from typing import Any, BinaryIO, Final, TypeGuard -from numpy.lib._utils_impl import drop_metadata # noqa: F401 +import numpy as np +import numpy.typing as npt +from numpy.lib._utils_impl import drop_metadata as drop_metadata __all__: list[str] = [] -EXPECTED_KEYS: Final[set[str]] -MAGIC_PREFIX: Final[bytes] -MAGIC_LEN: Literal[8] -ARRAY_ALIGN: Literal[64] -BUFFER_SIZE: Literal[262144] # 2**18 -GROWTH_AXIS_MAX_DIGITS: Literal[21] +type _DTypeDescr = list[tuple[str, str]] | list[tuple[str, str, tuple[int, ...]]] -def magic(major, minor): ... -def read_magic(fp): ... -def dtype_to_descr(dtype): ... -def descr_to_dtype(descr): ... -def header_data_from_array_1_0(array): ... -def write_array_header_1_0(fp, d): ... -def write_array_header_2_0(fp, d): ... -def read_array_header_1_0(fp): ... -def read_array_header_2_0(fp): ... -def write_array(fp, array, version=..., allow_pickle=..., pickle_kwargs=...): ... -def read_array(fp, allow_pickle=..., pickle_kwargs=...): ... -def open_memmap(filename, mode=..., dtype=..., shape=..., fortran_order=..., version=...): ... -def isfileobj(f): ... +### + +EXPECTED_KEYS: Final[set[str]] = ... +MAGIC_PREFIX: Final = b"\x93NUMPY" +MAGIC_LEN: Final = 8 +ARRAY_ALIGN: Final = 64 +BUFFER_SIZE: Final = 262_144 # 1 << 18 +GROWTH_AXIS_MAX_DIGITS: Final = 21 +_MAX_HEADER_SIZE: Final = 10_000 + +def magic(major: int, minor: int) -> bytes: ... +def read_magic(fp: SupportsRead[bytes]) -> tuple[int, int]: ... +def dtype_to_descr(dtype: np.dtype) -> _DTypeDescr: ... +def descr_to_dtype(descr: _DTypeDescr) -> np.dtype: ... +def header_data_from_array_1_0(array: np.ndarray) -> dict[str, Any]: ... +def write_array_header_1_0(fp: SupportsWrite[bytes], d: dict[str, Any]) -> None: ... +def write_array_header_2_0(fp: SupportsWrite[bytes], d: dict[str, Any]) -> None: ... +def read_array_header_1_0(fp: SupportsRead[bytes], max_header_size: int = 10_000) -> tuple[tuple[int, ...], bool, np.dtype]: ... +def read_array_header_2_0(fp: SupportsRead[bytes], max_header_size: int = 10_000) -> tuple[tuple[int, ...], bool, np.dtype]: ... +def write_array( + fp: SupportsWrite[bytes], + array: np.ndarray, + version: tuple[int, int] | None = None, + allow_pickle: bool = True, + pickle_kwargs: dict[str, Any] | None = None, +) -> None: ... +def read_array( + fp: SupportsRead[bytes], + allow_pickle: bool = False, + pickle_kwargs: dict[str, Any] | None = None, + *, + max_header_size: int = 10_000, +) -> np.ndarray: ... +def open_memmap( + filename: str | os.PathLike[Any], + mode: str = "r+", + dtype: npt.DTypeLike | None = None, + shape: tuple[int, ...] | None = None, + fortran_order: bool = False, + version: tuple[int, int] | None = None, + *, + max_header_size: int = 10_000, +) -> np.memmap: ... +def isfileobj(f: object) -> TypeGuard[BinaryIO]: ... # don't use `typing.TypeIs` diff --git a/numpy/lib/_function_base_impl.py b/numpy/lib/_function_base_impl.py index 63346088b6e2..3e0005079104 100644 --- a/numpy/lib/_function_base_impl.py +++ b/numpy/lib/_function_base_impl.py @@ -2,7 +2,6 @@ import collections.abc import functools import re -import sys import warnings import numpy as np @@ -10,9 +9,14 @@ from numpy._core import overrides, transpose from numpy._core._multiarray_umath import _array_converter from numpy._core.fromnumeric import any, mean, nonzero, partition, ravel, sum -from numpy._core.multiarray import _monotonicity, _place, bincount, normalize_axis_index -from numpy._core.multiarray import interp as compiled_interp -from numpy._core.multiarray import interp_complex as compiled_interp_complex +from numpy._core.multiarray import ( + _monotonicity, + _place, + bincount, + interp as compiled_interp, + interp_complex as compiled_interp_complex, + normalize_axis_index, +) from numpy._core.numeric import ( absolute, arange, @@ -37,6 +41,7 @@ arctan2, cos, exp, + floor, frompyfunc, less_equal, minimum, @@ -63,7 +68,7 @@ 'rot90', 'extract', 'place', 'vectorize', 'asarray_chkfinite', 'average', 'bincount', 'digitize', 'cov', 'corrcoef', 'median', 'sinc', 'hamming', 'hanning', 'bartlett', - 'blackman', 'kaiser', 'trapezoid', 'trapz', 'i0', + 'blackman', 'kaiser', 'trapezoid', 'i0', 'meshgrid', 'delete', 'insert', 'append', 'interp', 'quantile' ] @@ -608,12 +613,15 @@ def asarray_chkfinite(a, dtype=None, order=None): dtype : data-type, optional By default, the data-type is inferred from the input data. order : {'C', 'F', 'A', 'K'}, optional - Memory layout. 'A' and 'K' depend on the order of input array a. - 'C' row-major (C-style), - 'F' column-major (Fortran-style) memory representation. - 'A' (any) means 'F' if `a` is Fortran contiguous, 'C' otherwise - 'K' (keep) preserve input order - Defaults to 'C'. + The memory layout of the output. + 'C' gives a row-major layout (C-style), + 'F' gives a column-major layout (Fortran-style). + 'C' and 'F' will copy if needed to ensure the output format. + 'A' (any) is equivalent to 'F' if input a is non-contiguous or + Fortran-contiguous, otherwise, it is equivalent to 'C'. + Unlike 'C' or 'F', 'A' does not ensure that the result is contiguous. + 'K' (keep) preserves the input order for the output. + 'C' is the default. Returns ------- @@ -646,7 +654,7 @@ class ndarray is returned. ``asarray_chkfinite`` is identical to ``asarray``. >>> a = [1, 2] - >>> np.asarray_chkfinite(a, dtype=float) + >>> np.asarray_chkfinite(a, dtype=np.float64) array([1., 2.]) Raises ValueError if array_like contains Nans or Infs. @@ -699,7 +707,7 @@ def piecewise(x, condlist, funclist, *args, **kw): is the default value, used wherever all conditions are false. funclist : list of callables, f(x,*args,**kw), or scalars Each function is evaluated over `x` wherever its corresponding - condition is True. It should take a 1d array as input and give an 1d + condition is True. It should take a 1d array as input and give a 1d array or a scalar value as output. If, instead of a callable, a scalar is provided then a constant function (``lambda x: scalar``) is assumed. @@ -1312,7 +1320,10 @@ def gradient(f, *varargs, axis=None, edge_order=1): # fix the shape for broadcasting shape = np.ones(N, dtype=int) shape[axis] = -1 - a.shape = b.shape = c.shape = shape + + a = a.reshape(shape) + b = b.reshape(shape) + c = c.reshape(shape) # 1D equivalent -- out[1:-1] = a * f[:-2] + b * f[1:-1] + c * f[2:] out[tuple(slice1)] = a * f[tuple(slice2)] + b * f[tuple(slice3)] \ + c * f[tuple(slice4)] @@ -1775,6 +1786,7 @@ def unwrap(p, discont=None, axis=-1, *, period=2 * pi): Examples -------- >>> import numpy as np + >>> phase = np.linspace(0, np.pi, num=5) >>> phase[3:] += np.pi >>> phase @@ -1792,6 +1804,23 @@ def unwrap(p, discont=None, axis=-1, *, period=2 * pi): array([-180., -140., -100., -60., -20., 20., 60., 100., 140., 180., 220., 260., 300., 340., 380., 420., 460., 500., 540.]) + + This example plots the unwrapping of the wrapped input signal `w`. + First generate `w`, then apply `unwrap` to get `u`. + + >>> t = np.linspace(0, 25, 801) + >>> w = np.mod(1.5 * np.sin(1.1 * t + 0.26) * (1 - t / 6 + (t / 23) ** 3), 2.0) - 1 + >>> u = np.unwrap(w, period=2.0) + + Plot `w` and `u`. + + >>> import matplotlib.pyplot as plt + >>> plt.plot(t, w, label='w (a signal wrapped to [-1, 1])') + >>> plt.plot(t, u, linewidth=2.5, alpha=0.5, label='unwrap(w, period=2)') + >>> plt.xlabel('t') + >>> plt.grid(alpha=0.6) + >>> plt.legend(framealpha=1, shadow=True) + >>> plt.show() """ p = asarray(p) nd = p.ndim @@ -1976,6 +2005,14 @@ def trim_zeros(filt, trim='fb', axis=None): trim = trim.lower() if trim not in {"fb", "bf", "f", "b"}: raise ValueError(f"unexpected character(s) in `trim`: {trim!r}") + if axis is None: + axis_tuple = tuple(range(filt_.ndim)) + else: + axis_tuple = _nx.normalize_axis_tuple(axis, filt_.ndim, argname="axis") + + if not axis_tuple: + # No trimming requested -> return input unmodified. + return filt start, stop = _arg_trim_zeros(filt_) stop += 1 # Adjust for slicing @@ -1990,20 +2027,13 @@ def trim_zeros(filt, trim='fb', axis=None): if 'b' not in trim: stop = (None,) * filt_.ndim - if len(start) == 1: - # filt is 1D -> don't use multi-dimensional slicing to preserve + sl = tuple(slice(start[ax], stop[ax]) if ax in axis_tuple else slice(None) + for ax in range(filt_.ndim)) + if len(sl) == 1: + # filt is 1D -> avoid multi-dimensional slicing to preserve # non-array input types - sl = slice(start[0], stop[0]) - elif axis is None: - # trim all axes - sl = tuple(slice(*x) for x in zip(start, stop)) - else: - # only trim single axis - axis = normalize_axis_index(axis, filt_.ndim) - sl = (slice(None),) * axis + (slice(start[axis], stop[axis]),) + (...,) - - trimmed = filt[sl] - return trimmed + return filt[sl[0]] + return filt[sl] def _extract_dispatcher(condition, arr): @@ -2108,62 +2138,6 @@ def place(arr, mask, vals): return _place(arr, mask, vals) -def disp(mesg, device=None, linefeed=True): - """ - Display a message on a device. - - .. deprecated:: 2.0 - Use your own printing function instead. - - Parameters - ---------- - mesg : str - Message to display. - device : object - Device to write message. If None, defaults to ``sys.stdout`` which is - very similar to ``print``. `device` needs to have ``write()`` and - ``flush()`` methods. - linefeed : bool, optional - Option whether to print a line feed or not. Defaults to True. - - Raises - ------ - AttributeError - If `device` does not have a ``write()`` or ``flush()`` method. - - Examples - -------- - >>> import numpy as np - - Besides ``sys.stdout``, a file-like object can also be used as it has - both required methods: - - >>> from io import StringIO - >>> buf = StringIO() - >>> np.disp('"Display" in a file', device=buf) - >>> buf.getvalue() - '"Display" in a file\\n' - - """ - - # Deprecated in NumPy 2.0, 2023-07-11 - warnings.warn( - "`disp` is deprecated, " - "use your own printing function instead. " - "(deprecated in NumPy 2.0)", - DeprecationWarning, - stacklevel=2 - ) - - if device is None: - device = sys.stdout - if linefeed: - device.write(f'{mesg}\n') - else: - device.write(f'{mesg}') - device.flush() - - # See https://docs.scipy.org/doc/numpy/reference/c-api.generalized-ufuncs.html _DIMENSION_NAME = r'\w+' _CORE_DIMENSION_LIST = f'(?:{_DIMENSION_NAME}(?:,{_DIMENSION_NAME})*)?' @@ -2326,8 +2300,8 @@ class vectorize: passed directly to `pyfunc` unmodified. cache : bool, optional - If `True`, then cache the first function call that determines the number - of outputs if `otypes` is not provided. + If neither `otypes` nor `signature` are provided, and `cache` is ``True``, then + cache the number of outputs. signature : string, optional Generalized universal function signature, e.g., ``(m,n),(n)->(m)`` for @@ -2351,12 +2325,12 @@ class vectorize: The `vectorize` function is provided primarily for convenience, not for performance. The implementation is essentially a for loop. - If `otypes` is not specified, then a call to the function with the - first argument will be used to determine the number of outputs. The - results of this call will be cached if `cache` is `True` to prevent - calling the function twice. However, to implement the cache, the - original function must be wrapped which will slow down subsequent - calls, so only do this if your function is expensive. + If neither `otypes` nor `signature` are specified, then a call to the function with + the first argument will be used to determine the number of outputs. The results of + this call will be cached if `cache` is `True` to prevent calling the function + twice. However, to implement the cache, the original function must be wrapped + which will slow down subsequent calls, so only do this if your function is + expensive. The new keyword argument interface and `excluded` argument support further degrades performance. @@ -2573,6 +2547,7 @@ def _get_ufunc_and_otypes(self, func, args): # the subsequent call when the ufunc is evaluated. # Assumes that ufunc first evaluates the 0th elements in the input # arrays (the input values are not checked to ensure this) + args = [asarray(a) for a in args] if builtins.any(arg.size == 0 for arg in args): raise ValueError('cannot call `vectorize` on size 0 inputs ' 'unless `otypes` is set') @@ -2618,8 +2593,9 @@ def _vectorize_call(self, func, args): elif not args: res = func() else: - args = [asanyarray(a, dtype=object) for a in args] ufunc, otypes = self._get_ufunc_and_otypes(func=func, args=args) + # gh-29196: `dtype=object` should eventually be removed + args = [asanyarray(a, dtype=object) for a in args] outputs = ufunc(*args, out=...) if ufunc.nout == 1: @@ -2702,7 +2678,7 @@ def cov(m, y=None, rowvar=True, bias=False, ddof=None, fweights=None, Estimate a covariance matrix, given data and weights. Covariance indicates the level to which two variables vary together. - If we examine N-dimensional samples, :math:`X = [x_1, x_2, ... x_N]^T`, + If we examine N-dimensional samples, :math:`X = [x_1, x_2, ..., x_N]^T`, then the covariance matrix element :math:`C_{ij}` is the covariance of :math:`x_i` and :math:`x_j`. The element :math:`C_{ii}` is the variance of :math:`x_i`. @@ -2913,13 +2889,13 @@ def cov(m, y=None, rowvar=True, bias=False, ddof=None, fweights=None, return c.squeeze() -def _corrcoef_dispatcher(x, y=None, rowvar=None, bias=None, ddof=None, *, +def _corrcoef_dispatcher(x, y=None, rowvar=None, *, dtype=None): return (x, y) @array_function_dispatch(_corrcoef_dispatcher) -def corrcoef(x, y=None, rowvar=True, bias=np._NoValue, ddof=np._NoValue, *, +def corrcoef(x, y=None, rowvar=True, *, dtype=None): """ Return Pearson product-moment correlation coefficients. @@ -2946,14 +2922,7 @@ def corrcoef(x, y=None, rowvar=True, bias=np._NoValue, ddof=np._NoValue, *, variable, with observations in the columns. Otherwise, the relationship is transposed: each column represents a variable, while the rows contain observations. - bias : _NoValue, optional - Has no effect, do not use. - - .. deprecated:: 1.10.0 - ddof : _NoValue, optional - Has no effect, do not use. - .. deprecated:: 1.10.0 dtype : data-type, optional Data-type of the result. By default, the return data-type will have at least `numpy.float64` precision. @@ -2977,11 +2946,6 @@ def corrcoef(x, y=None, rowvar=True, bias=np._NoValue, ddof=np._NoValue, *, interval [-1, 1] in an attempt to improve on that situation but is not much help in the complex case. - This function accepts but discards arguments `bias` and `ddof`. This is - for backwards compatibility with previous versions of this function. These - arguments had no effect on the return values of the function and can be - safely ignored in this and previous versions of numpy. - Examples -------- >>> import numpy as np @@ -3048,10 +3012,6 @@ def corrcoef(x, y=None, rowvar=True, bias=np._NoValue, ddof=np._NoValue, *, 1. ]]) """ - if bias is not np._NoValue or ddof is not np._NoValue: - # 2015-03-15, 1.10 - warnings.warn('bias and ddof have no effect and are deprecated', - DeprecationWarning, stacklevel=2) c = cov(x, y, rowvar, dtype=dtype) try: d = diag(c) @@ -3897,13 +3857,21 @@ def _ureduce(a, func, keepdims=False, **kwargs): if len(axis) == 1: kwargs['axis'] = axis[0] else: - keep = set(range(nd)) - set(axis) + keep = sorted(set(range(nd)) - set(axis)) nkeep = len(keep) - # swap axis that should not be reduced to front - for i, s in enumerate(sorted(keep)): - a = a.swapaxes(i, s) - # merge reduced axis - a = a.reshape(a.shape[:nkeep] + (-1,)) + + def reshape_arr(a): + # move axis that should not be reduced to front + a = np.moveaxis(a, keep, range(nkeep)) + # merge reduced axis + return a.reshape(a.shape[:nkeep] + (-1,)) + + a = reshape_arr(a) + + weights = kwargs.get("weights") + if weights is not None: + kwargs["weights"] = reshape_arr(weights) + kwargs['axis'] = -1 elif keepdims and out is not None: index_out = (0, ) * nd @@ -4077,8 +4045,7 @@ def _median(a, axis=None, out=None, overwrite_input=False): def _percentile_dispatcher(a, q, axis=None, out=None, overwrite_input=None, - method=None, keepdims=None, *, weights=None, - interpolation=None): + method=None, keepdims=None, *, weights=None): return (a, q, out, weights) @@ -4091,8 +4058,7 @@ def percentile(a, method="linear", keepdims=False, *, - weights=None, - interpolation=None): + weights=None): """ Compute the q-th percentile of the data along the specified axis. @@ -4150,7 +4116,7 @@ def percentile(a, the result as dimensions with size one. With this option, the result will broadcast correctly against the original array `a`. - weights : array_like, optional + weights : array_like, optional An array of weights associated with the values in `a`. Each value in `a` contributes to the percentile according to its associated weight. The weights array can either be 1-D (in which case its length must be @@ -4162,11 +4128,6 @@ def percentile(a, .. versionadded:: 2.0.0 - interpolation : str, optional - Deprecated name for the method keyword argument. - - .. deprecated:: 1.22.0 - Returns ------- percentile : scalar or ndarray @@ -4262,17 +4223,11 @@ def percentile(a, The American Statistician, 50(4), pp. 361-365, 1996 """ - if interpolation is not None: - method = _check_interpolation_as_method( - method, interpolation, "percentile") - a = np.asanyarray(a) if a.dtype.kind == "c": raise TypeError("a must be an array of real numbers") - # Use dtype of array if possible (e.g., if q is a python int or float) - # by making the divisor have the dtype of the data array. - q = np.true_divide(q, a.dtype.type(100) if a.dtype.kind == "f" else 100, out=...) + q = np.true_divide(q, 100, out=...) if not _quantile_is_valid(q): raise ValueError("Percentiles must be in the range [0, 100]") @@ -4292,8 +4247,7 @@ def percentile(a, def _quantile_dispatcher(a, q, axis=None, out=None, overwrite_input=None, - method=None, keepdims=None, *, weights=None, - interpolation=None): + method=None, keepdims=None, *, weights=None): return (a, q, out, weights) @@ -4306,8 +4260,7 @@ def quantile(a, method="linear", keepdims=False, *, - weights=None, - interpolation=None): + weights=None): """ Compute the q-th quantile of the data along the specified axis. @@ -4377,11 +4330,6 @@ def quantile(a, .. versionadded:: 2.0.0 - interpolation : str, optional - Deprecated name for the method keyword argument. - - .. deprecated:: 1.22.0 - Returns ------- quantile : scalar or ndarray @@ -4523,19 +4471,11 @@ def quantile(a, The American Statistician, 50(4), pp. 361-365, 1996 """ - if interpolation is not None: - method = _check_interpolation_as_method( - method, interpolation, "quantile") - a = np.asanyarray(a) if a.dtype.kind == "c": raise TypeError("a must be an array of real numbers") - # Use dtype of array if possible (e.g., if q is a python int or float). - if isinstance(q, (int, float)) and a.dtype.kind == "f": - q = np.asanyarray(q, dtype=a.dtype) - else: - q = np.asanyarray(q) + q = np.asanyarray(q) if not _quantile_is_valid(q): raise ValueError("Quantiles must be in the range [0, 1]") @@ -4586,23 +4526,6 @@ def _quantile_is_valid(q): return True -def _check_interpolation_as_method(method, interpolation, fname): - # Deprecated NumPy 1.22, 2021-11-08 - warnings.warn( - f"the `interpolation=` argument to {fname} was renamed to " - "`method=`, which has additional options.\n" - "Users of the modes 'nearest', 'lower', 'higher', or " - "'midpoint' are encouraged to review the method they used. " - "(Deprecated NumPy 1.22)", - DeprecationWarning, stacklevel=4) - if method != "linear": - # sanity check, we assume this basically never happens - raise TypeError( - "You shall not pass both `method` and `interpolation`!\n" - "(`interpolation` is Deprecated in favor of `method`)") - return interpolation - - def _compute_virtual_index(n, quantiles, alpha: float, beta: float): """ Compute the floating point indexes of an array for the linear @@ -4628,7 +4551,7 @@ def _compute_virtual_index(n, quantiles, alpha: float, beta: float): ) - 1 -def _get_gamma(virtual_indexes, previous_indexes, method): +def _get_gamma(virtual_indexes, previous_indexes, method, dtype): """ Compute gamma (a.k.a 'm' or 'weight') for the linear interpolation of quantiles. @@ -4638,7 +4561,7 @@ def _get_gamma(virtual_indexes, previous_indexes, method): sample. previous_indexes : array_like The floor values of virtual_indexes. - interpolation : dict + method : dict The interpolation method chosen, which may have a specific rule modifying gamma. @@ -4649,7 +4572,7 @@ def _get_gamma(virtual_indexes, previous_indexes, method): gamma = method["fix_gamma"](gamma, virtual_indexes) # Ensure both that we have an array, and that we keep the dtype # (which may have been matched to the input array). - return np.asanyarray(gamma, dtype=virtual_indexes.dtype) + return np.asanyarray(gamma, dtype=dtype) def _lerp(a, b, t, out=None): @@ -4666,9 +4589,8 @@ def _lerp(a, b, t, out=None): out : array_like Output array. """ - diff_b_a = subtract(b, a) - # asanyarray is a stop-gap until gh-13105 - lerp_interpolation = asanyarray(add(a, diff_b_a * t, out=out)) + diff_b_a = b - a + lerp_interpolation = add(a, diff_b_a * t, out=... if out is None else out) subtract(b, diff_b_a * (1 - t), out=lerp_interpolation, where=t >= 0.5, casting='unsafe', dtype=type(lerp_interpolation.dtype)) if lerp_interpolation.ndim == 0 and out is None: @@ -4711,14 +4633,14 @@ def _inverted_cdf(n, quantiles): def _quantile_ureduce_func( - a: np.array, - q: np.array, - weights: np.array, - axis: int | None = None, - out=None, - overwrite_input: bool = False, - method="linear", -) -> np.array: + a: np.ndarray, + q: np.ndarray, + weights: np.ndarray | None, + axis: int | None = None, + out: np.ndarray | None = None, + overwrite_input: bool = False, + method: str = "linear", +) -> np.ndarray: if q.ndim > 2: # The code below works fine for nd, but it might not have useful # semantics. For now, keep the supported dimensions the same as it was @@ -4760,8 +4682,8 @@ def _get_indexes(arr, virtual_indexes, valid_values_count): (previous_indexes, next_indexes): Tuple A Tuple of virtual_indexes neighbouring indexes """ - previous_indexes = np.asanyarray(np.floor(virtual_indexes)) - next_indexes = np.asanyarray(previous_indexes + 1) + previous_indexes = floor(virtual_indexes, out=...) + next_indexes = add(previous_indexes, 1, out=...) indexes_above_bounds = virtual_indexes >= valid_values_count - 1 # When indexes is above max index, take the max value of the array if indexes_above_bounds.any(): @@ -4784,19 +4706,19 @@ def _get_indexes(arr, virtual_indexes, valid_values_count): def _quantile( - arr: np.array, - quantiles: np.array, - axis: int = -1, - method="linear", - out=None, - weights=None, -): + arr: "np.typing.ArrayLike", + quantiles: np.ndarray, + axis: int = -1, + method: str = "linear", + out: np.ndarray | None = None, + weights: "np.typing.ArrayLike | None" = None, +) -> np.ndarray: """ Private function that doesn't support extended axis or keepdims. These methods are extended to this function using _ureduce See nanpercentile for parameter usage It computes the quantiles of the array for the given axis. - A linear interpolation is performed based on the `interpolation`. + A linear interpolation is performed based on the `method`. By default, the method is "linear" where alpha == beta == 1 which performs the 7th method of Hyndman&Fan. @@ -4817,7 +4739,7 @@ def _quantile( if weights is None: # --- Computation of indexes # Index where to find the value in the sorted array. - # Virtual because it is a floating point value, not an valid index. + # Virtual because it is a floating point value, not a valid index. # The nearest neighbours are used for interpolation try: method_props = _QuantileMethods[method] @@ -4868,7 +4790,16 @@ def _quantile( previous = arr[previous_indexes] next = arr[next_indexes] # --- Linear interpolation - gamma = _get_gamma(virtual_indexes, previous_indexes, method_props) + if arr.dtype.kind in "iu": + gtype = None + elif arr.dtype.kind == "f": + # make sure the return value matches the input array type + gtype = arr.dtype + else: + gtype = virtual_indexes.dtype + + gamma = _get_gamma(virtual_indexes, previous_indexes, + method_props, gtype) result_shape = virtual_indexes.shape + (1,) * (arr.ndim - 1) gamma = gamma.reshape(result_shape) result = _lerp(previous, @@ -4882,7 +4813,7 @@ def _quantile( weights = np.asanyarray(weights) if axis != 0: weights = np.moveaxis(weights, axis, destination=0) - index_array = np.argsort(arr, axis=0, kind="stable") + index_array = np.argsort(arr, axis=0) # arr = arr[index_array, ...] # but this adds trailing dimensions of # 1. @@ -4904,6 +4835,9 @@ def _quantile( # distribution function cdf cdf = weights.cumsum(axis=0, dtype=np.float64) cdf /= cdf[-1, ...] # normalization to 1 + if np.isnan(cdf[-1]).any(): + # Above calculations should normally warn for the zero/inf case. + raise ValueError("Weights included NaN, inf or were all zero.") # Search index i such that # sum(weights[j], j=0..i-1) < quantile <= sum(weights[j], j=0..i) # is then equivalent to @@ -5106,24 +5040,6 @@ def trapezoid(y, x=None, dx=1.0, axis=-1): return ret -@set_module('numpy') -def trapz(y, x=None, dx=1.0, axis=-1): - """ - `trapz` is deprecated in NumPy 2.0. - - Please use `trapezoid` instead, or one of the numerical integration - functions in `scipy.integrate`. - """ - # Deprecated in NumPy 2.0, 2023-08-18 - warnings.warn( - "`trapz` is deprecated. Use `trapezoid` instead, or one of the " - "numerical integration functions in `scipy.integrate`.", - DeprecationWarning, - stacklevel=2 - ) - return trapezoid(y, x=x, dx=dx, axis=axis) - - def _meshgrid_dispatcher(*xi, copy=None, sparse=None, indexing=None): return xi @@ -5323,7 +5239,7 @@ def delete(arr, obj, axis=None): Often it is preferable to use a boolean mask. For example: >>> arr = np.arange(12) + 1 - >>> mask = np.ones(len(arr), dtype=bool) + >>> mask = np.ones(len(arr), dtype=np.bool) >>> mask[[0,2,4]] = False >>> result = arr[mask,...] @@ -5706,7 +5622,7 @@ def append(arr, values, axis=None): the array at index 0 has 2 dimension(s) and the array at index 1 has 1 dimension(s) - >>> a = np.array([1, 2], dtype=int) + >>> a = np.array([1, 2], dtype=np.int_) >>> c = np.append(a, []) >>> c array([1., 2.]) diff --git a/numpy/lib/_function_base_impl.pyi b/numpy/lib/_function_base_impl.pyi index 090fb233dde1..769c321b9988 100644 --- a/numpy/lib/_function_base_impl.pyi +++ b/numpy/lib/_function_base_impl.pyi @@ -1,38 +1,20 @@ -# ruff: noqa: ANN401 +from _typeshed import ConvertibleToInt, Incomplete from collections.abc import Callable, Iterable, Sequence from typing import ( Any, Concatenate, - ParamSpec, + Literal as L, + Never, Protocol, SupportsIndex, SupportsInt, - TypeAlias, - TypeVar, overload, type_check_only, ) -from typing import Literal as L - -from _typeshed import Incomplete -from typing_extensions import TypeIs, deprecated +from typing_extensions import TypeIs import numpy as np -from numpy import ( - _OrderKACF, - bool_, - complex128, - complexfloating, - datetime64, - float64, - floating, - generic, - integer, - intp, - object_, - timedelta64, - vectorize, -) +from numpy import _OrderKACF from numpy._core.multiarray import bincount from numpy._globals import _NoValueType from numpy._typing import ( @@ -42,19 +24,19 @@ from numpy._typing import ( _ArrayLike, _ArrayLikeBool_co, _ArrayLikeComplex_co, - _ArrayLikeDT64_co, _ArrayLikeFloat_co, _ArrayLikeInt_co, _ArrayLikeNumber_co, _ArrayLikeObject_co, - _ArrayLikeTD64_co, _ComplexLike_co, _DTypeLike, _FloatLike_co, - _NestedSequence, + _NestedSequence as _SeqND, _NumberLike_co, _ScalarLike_co, + _Shape, _ShapeLike, + _SupportsArray, ) __all__ = [ @@ -88,7 +70,6 @@ __all__ = [ "blackman", "kaiser", "trapezoid", - "trapz", "i0", "meshgrid", "delete", @@ -98,213 +79,678 @@ __all__ = [ "quantile", ] -_T = TypeVar("_T") -_T_co = TypeVar("_T_co", covariant=True) -# The `{}ss` suffix refers to the Python 3.12 syntax: `**P` -_Pss = ParamSpec("_Pss") -_ScalarT = TypeVar("_ScalarT", bound=generic) -_ScalarT1 = TypeVar("_ScalarT1", bound=generic) -_ScalarT2 = TypeVar("_ScalarT2", bound=generic) -_ArrayT = TypeVar("_ArrayT", bound=np.ndarray) +type _ArrayLike1D[ScalarT: np.generic] = _SupportsArray[np.dtype[ScalarT]] | Sequence[ScalarT] + +type _integer_co = np.integer | np.bool +type _float64_co = np.float64 | _integer_co +type _floating_co = np.floating | _integer_co + +# non-trivial scalar-types that will become `complex128` in `sort_complex()`, +# i.e. all numeric scalar types except for `[u]int{8,16} | longdouble` +type _SortsToComplex128 = ( + np.bool + | np.int32 + | np.uint32 + | np.int64 + | np.uint64 + | np.float16 + | np.float32 + | np.float64 + | np.timedelta64 + | np.object_ +) +type _ScalarNumeric = np.inexact | np.timedelta64 | np.object_ +type _InexactDouble = np.float64 | np.longdouble | np.complex128 | np.clongdouble + +type _Array[ShapeT: _Shape, ScalarT: np.generic] = np.ndarray[ShapeT, np.dtype[ScalarT]] +type _Array0D[ScalarT: np.generic] = np.ndarray[tuple[()], np.dtype[ScalarT]] +type _Array1D[ScalarT: np.generic] = np.ndarray[tuple[int], np.dtype[ScalarT]] +type _Array2D[ScalarT: np.generic] = np.ndarray[tuple[int, int], np.dtype[ScalarT]] +type _Array3D[ScalarT: np.generic] = np.ndarray[tuple[int, int, int], np.dtype[ScalarT]] +type _ArrayMax2D[ScalarT: np.generic] = np.ndarray[tuple[int] | tuple[int, int], np.dtype[ScalarT]] +# workaround for mypy and pyright not following the typing spec for overloads +type _ArrayNoD[ScalarT: np.generic] = np.ndarray[tuple[Never, Never, Never, Never], np.dtype[ScalarT]] + +type _Seq1D[T] = Sequence[T] +type _Seq2D[T] = Sequence[Sequence[T]] +type _Seq3D[T] = Sequence[Sequence[Sequence[T]]] +type _ListSeqND[T] = list[T] | _SeqND[list[T]] + +type _Tuple2[T] = tuple[T, T] +type _Tuple3[T] = tuple[T, T, T] +type _Tuple4[T] = tuple[T, T, T, T] + +type _Mesh1[ScalarT: np.generic] = tuple[_Array1D[ScalarT]] +type _Mesh2[ScalarT: np.generic, ScalarT1: np.generic] = tuple[_Array2D[ScalarT], _Array2D[ScalarT1]] +type _Mesh3[ScalarT: np.generic, ScalarT1: np.generic, ScalarT2: np.generic] = tuple[ + _Array3D[ScalarT], _Array3D[ScalarT1], _Array3D[ScalarT2] +] + +type _IndexLike = slice | _ArrayLikeInt_co + +type _Indexing = L["ij", "xy"] +type _InterpolationMethod = L[ + "inverted_cdf", + "averaged_inverted_cdf", + "closest_observation", + "interpolated_inverted_cdf", + "hazen", + "weibull", + "linear", + "median_unbiased", + "normal_unbiased", + "lower", + "higher", + "midpoint", + "nearest", +] -_2Tuple: TypeAlias = tuple[_T, _T] -_MeshgridIdx: TypeAlias = L['ij', 'xy'] +# The resulting value will be used as `y[cond] = func(vals, *args, **kw)`, so in can +# return any (usually 1d) array-like or scalar-like compatible with the input. +type _PiecewiseFunction[ScalarT: np.generic, **Tss] = Callable[Concatenate[NDArray[ScalarT], Tss], ArrayLike] +type _PiecewiseFunctions[ScalarT: np.generic, **Tss] = _SizedIterable[_PiecewiseFunction[ScalarT, Tss] | _ScalarLike_co] @type_check_only -class _TrimZerosSequence(Protocol[_T_co]): +class _TrimZerosSequence[T](Protocol): def __len__(self, /) -> int: ... @overload def __getitem__(self, key: int, /) -> object: ... @overload - def __getitem__(self, key: slice, /) -> _T_co: ... + def __getitem__(self, key: slice, /) -> T: ... + +@type_check_only +class _SupportsRMulFloat[T](Protocol): + def __rmul__(self, other: float, /) -> T: ... + +@type_check_only +class _SizedIterable[T](Protocol): + def __iter__(self) -> Iterable[T]: ... + def __len__(self) -> int: ... ### +class vectorize: + __doc__: str | None + __module__: L["numpy"] = "numpy" + pyfunc: Callable[..., Incomplete] + cache: bool + signature: str | None + otypes: str | None + excluded: set[int | str] + + def __init__( + self, + /, + pyfunc: Callable[..., Incomplete] | _NoValueType = ..., # = _NoValue + otypes: str | Iterable[DTypeLike] | None = None, + doc: str | None = None, + excluded: Iterable[int | str] | None = None, + cache: bool = False, + signature: str | None = None, + ) -> None: ... + def __call__(self, /, *args: Incomplete, **kwargs: Incomplete) -> Incomplete: ... + @overload -def rot90( - m: _ArrayLike[_ScalarT], - k: int = ..., - axes: tuple[int, int] = ..., -) -> NDArray[_ScalarT]: ... +def rot90[ArrayT: np.ndarray](m: ArrayT, k: int = 1, axes: tuple[int, int] = (0, 1)) -> ArrayT: ... @overload -def rot90( - m: ArrayLike, - k: int = ..., - axes: tuple[int, int] = ..., -) -> NDArray[Any]: ... - +def rot90[ScalarT: np.generic](m: _ArrayLike[ScalarT], k: int = 1, axes: tuple[int, int] = (0, 1)) -> NDArray[ScalarT]: ... @overload -def flip(m: _ScalarT, axis: None = ...) -> _ScalarT: ... +def rot90(m: ArrayLike, k: int = 1, axes: tuple[int, int] = (0, 1)) -> NDArray[Incomplete]: ... + +# NOTE: Technically `flip` also accept scalars, but that has no effect and complicates +# the overloads significantly, so we ignore that case here. @overload -def flip(m: _ScalarLike_co, axis: None = ...) -> Any: ... +def flip[ArrayT: np.ndarray](m: ArrayT, axis: int | tuple[int, ...] | None = None) -> ArrayT: ... @overload -def flip(m: _ArrayLike[_ScalarT], axis: _ShapeLike | None = ...) -> NDArray[_ScalarT]: ... +def flip[ScalarT: np.generic](m: _ArrayLike[ScalarT], axis: int | tuple[int, ...] | None = None) -> NDArray[ScalarT]: ... @overload -def flip(m: ArrayLike, axis: _ShapeLike | None = ...) -> NDArray[Any]: ... +def flip(m: ArrayLike, axis: int | tuple[int, ...] | None = None) -> NDArray[Incomplete]: ... +# def iterable(y: object) -> TypeIs[Iterable[Any]]: ... -@overload +# NOTE: This assumes that if `axis` is given the input is at least 2d, and will +# therefore always return an array. +# NOTE: This assumes that if `keepdims=True` the input is at least 1d, and will +# therefore always return an array. +@overload # inexact array, keepdims=True +def average[ArrayT: NDArray[np.inexact]]( + a: ArrayT, + axis: int | tuple[int, ...] | None = None, + weights: _ArrayLikeNumber_co | None = None, + returned: L[False] = False, + *, + keepdims: L[True], +) -> ArrayT: ... +@overload # inexact array, returned=True keepdims=True +def average[ArrayT: NDArray[np.inexact]]( + a: ArrayT, + axis: int | tuple[int, ...] | None = None, + weights: _ArrayLikeNumber_co | None = None, + *, + returned: L[True], + keepdims: L[True], +) -> _Tuple2[ArrayT]: ... +@overload # inexact array-like, axis=None +def average[ScalarT: np.inexact]( + a: _ArrayLike[ScalarT], + axis: None = None, + weights: _ArrayLikeNumber_co | None = None, + returned: L[False] = False, + *, + keepdims: L[False] | _NoValueType = ..., +) -> ScalarT: ... +@overload # inexact array-like, axis= +def average[ScalarT: np.inexact]( + a: _ArrayLike[ScalarT], + axis: int | tuple[int, ...], + weights: _ArrayLikeNumber_co | None = None, + returned: L[False] = False, + *, + keepdims: L[False] | _NoValueType = ..., +) -> NDArray[ScalarT]: ... +@overload # inexact array-like, keepdims=True +def average[ScalarT: np.inexact]( + a: _ArrayLike[ScalarT], + axis: int | tuple[int, ...] | None = None, + weights: _ArrayLikeNumber_co | None = None, + returned: L[False] = False, + *, + keepdims: L[True], +) -> NDArray[ScalarT]: ... +@overload # inexact array-like, axis=None, returned=True +def average[ScalarT: np.inexact]( + a: _ArrayLike[ScalarT], + axis: None = None, + weights: _ArrayLikeNumber_co | None = None, + *, + returned: L[True], + keepdims: L[False] | _NoValueType = ..., +) -> _Tuple2[ScalarT]: ... +@overload # inexact array-like, axis=, returned=True +def average[ScalarT: np.inexact]( + a: _ArrayLike[ScalarT], + axis: int | tuple[int, ...], + weights: _ArrayLikeNumber_co | None = None, + *, + returned: L[True], + keepdims: L[False] | _NoValueType = ..., +) -> _Tuple2[NDArray[ScalarT]]: ... +@overload # inexact array-like, returned=True, keepdims=True +def average[ScalarT: np.inexact]( + a: _ArrayLike[ScalarT], + axis: int | tuple[int, ...] | None = None, + weights: _ArrayLikeNumber_co | None = None, + *, + returned: L[True], + keepdims: L[True], +) -> _Tuple2[NDArray[ScalarT]]: ... +@overload # bool or integer array-like, axis=None def average( - a: _ArrayLikeFloat_co, + a: _SeqND[float] | _ArrayLikeInt_co, axis: None = None, weights: _ArrayLikeFloat_co | None = None, returned: L[False] = False, *, keepdims: L[False] | _NoValueType = ..., -) -> floating: ... -@overload +) -> np.float64: ... +@overload # bool or integer array-like, axis= +def average( + a: _SeqND[float] | _ArrayLikeInt_co, + axis: int | tuple[int, ...], + weights: _ArrayLikeFloat_co | None = None, + returned: L[False] = False, + *, + keepdims: L[False] | _NoValueType = ..., +) -> NDArray[np.float64]: ... +@overload # bool or integer array-like, keepdims=True +def average( + a: _SeqND[float] | _ArrayLikeInt_co, + axis: int | tuple[int, ...] | None = None, + weights: _ArrayLikeFloat_co | None = None, + returned: L[False] = False, + *, + keepdims: L[True], +) -> NDArray[np.float64]: ... +@overload # bool or integer array-like, axis=None, returned=True def average( - a: _ArrayLikeFloat_co, + a: _SeqND[float] | _ArrayLikeInt_co, axis: None = None, weights: _ArrayLikeFloat_co | None = None, *, returned: L[True], keepdims: L[False] | _NoValueType = ..., -) -> _2Tuple[floating]: ... -@overload +) -> _Tuple2[np.float64]: ... +@overload # bool or integer array-like, axis=, returned=True +def average( + a: _SeqND[float] | _ArrayLikeInt_co, + axis: int | tuple[int, ...], + weights: _ArrayLikeFloat_co | None = None, + *, + returned: L[True], + keepdims: L[False] | _NoValueType = ..., +) -> _Tuple2[NDArray[np.float64]]: ... +@overload # bool or integer array-like, returned=True, keepdims=True +def average( + a: _SeqND[float] | _ArrayLikeInt_co, + axis: int | tuple[int, ...] | None = None, + weights: _ArrayLikeFloat_co | None = None, + *, + returned: L[True], + keepdims: L[True], +) -> _Tuple2[NDArray[np.float64]]: ... +@overload # complex array-like, axis=None def average( - a: _ArrayLikeComplex_co, + a: _ListSeqND[complex], axis: None = None, weights: _ArrayLikeComplex_co | None = None, returned: L[False] = False, *, keepdims: L[False] | _NoValueType = ..., -) -> complexfloating: ... -@overload +) -> np.complex128: ... +@overload # complex array-like, axis= +def average( + a: _ListSeqND[complex], + axis: int | tuple[int, ...], + weights: _ArrayLikeComplex_co | None = None, + returned: L[False] = False, + *, + keepdims: L[False] | _NoValueType = ..., +) -> NDArray[np.complex128]: ... +@overload # complex array-like, keepdims=True +def average( + a: _ListSeqND[complex], + axis: int | tuple[int, ...] | None = None, + weights: _ArrayLikeComplex_co | None = None, + returned: L[False] = False, + *, + keepdims: L[True], +) -> NDArray[np.complex128]: ... +@overload # complex array-like, axis=None, returned=True def average( - a: _ArrayLikeComplex_co, + a: _ListSeqND[complex], axis: None = None, weights: _ArrayLikeComplex_co | None = None, *, returned: L[True], keepdims: L[False] | _NoValueType = ..., -) -> _2Tuple[complexfloating]: ... -@overload +) -> _Tuple2[np.complex128]: ... +@overload # complex array-like, axis=, returned=True def average( - a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - axis: _ShapeLike | None = None, - weights: object | None = None, + a: _ListSeqND[complex], + axis: int | tuple[int, ...], + weights: _ArrayLikeComplex_co | None = None, *, returned: L[True], - keepdims: bool | bool_ | _NoValueType = ..., -) -> _2Tuple[Incomplete]: ... -@overload + keepdims: L[False] | _NoValueType = ..., +) -> _Tuple2[NDArray[np.complex128]]: ... +@overload # complex array-like, keepdims=True, returned=True def average( - a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - axis: _ShapeLike | None = None, - weights: object | None = None, - returned: bool | bool_ = False, + a: _ListSeqND[complex], + axis: int | tuple[int, ...] | None = None, + weights: _ArrayLikeComplex_co | None = None, *, - keepdims: bool | bool_ | _NoValueType = ..., -) -> Incomplete: ... + returned: L[True], + keepdims: L[True], +) -> _Tuple2[NDArray[np.complex128]]: ... +@overload # unknown, axis=None +def average( + a: _ArrayLikeNumber_co | _ArrayLikeObject_co, + axis: None = None, + weights: _ArrayLikeNumber_co | None = None, + returned: L[False] = False, + *, + keepdims: L[False] | _NoValueType = ..., +) -> Any: ... +@overload # unknown, axis= +def average( + a: _ArrayLikeNumber_co | _ArrayLikeObject_co, + axis: int | tuple[int, ...], + weights: _ArrayLikeNumber_co | None = None, + returned: L[False] = False, + *, + keepdims: L[False] | _NoValueType = ..., +) -> np.ndarray: ... +@overload # unknown, keepdims=True +def average( + a: _ArrayLikeNumber_co | _ArrayLikeObject_co, + axis: int | tuple[int, ...] | None = None, + weights: _ArrayLikeNumber_co | None = None, + returned: L[False] = False, + *, + keepdims: L[True], +) -> np.ndarray: ... +@overload # unknown, axis=None, returned=True +def average( + a: _ArrayLikeNumber_co | _ArrayLikeObject_co, + axis: None = None, + weights: _ArrayLikeNumber_co | None = None, + *, + returned: L[True], + keepdims: L[False] | _NoValueType = ..., +) -> _Tuple2[Any]: ... +@overload # unknown, axis=, returned=True +def average( + a: _ArrayLikeNumber_co | _ArrayLikeObject_co, + axis: int | tuple[int, ...], + weights: _ArrayLikeNumber_co | None = None, + *, + returned: L[True], + keepdims: L[False] | _NoValueType = ..., +) -> _Tuple2[np.ndarray]: ... +@overload # unknown, returned=True, keepdims=True +def average( + a: _ArrayLikeNumber_co | _ArrayLikeObject_co, + axis: int | tuple[int, ...] | None = None, + weights: _ArrayLikeNumber_co | None = None, + *, + returned: L[True], + keepdims: L[True], +) -> _Tuple2[np.ndarray]: ... +# @overload -def asarray_chkfinite( - a: _ArrayLike[_ScalarT], - dtype: None = ..., - order: _OrderKACF = ..., -) -> NDArray[_ScalarT]: ... -@overload -def asarray_chkfinite( - a: object, - dtype: None = ..., - order: _OrderKACF = ..., -) -> NDArray[Any]: ... -@overload -def asarray_chkfinite( - a: Any, - dtype: _DTypeLike[_ScalarT], - order: _OrderKACF = ..., -) -> NDArray[_ScalarT]: ... -@overload -def asarray_chkfinite( - a: Any, - dtype: DTypeLike, - order: _OrderKACF = ..., -) -> NDArray[Any]: ... - -@overload -def piecewise( - x: _ArrayLike[_ScalarT], - condlist: _ArrayLike[bool_] | Sequence[_ArrayLikeBool_co], - funclist: Sequence[ - Callable[Concatenate[NDArray[_ScalarT], _Pss], NDArray[_ScalarT | Any]] - | _ScalarT | object - ], - /, - *args: _Pss.args, - **kw: _Pss.kwargs, -) -> NDArray[_ScalarT]: ... +def asarray_chkfinite[ArrayT: np.ndarray](a: ArrayT, dtype: None = None, order: _OrderKACF = None) -> ArrayT: ... +@overload +def asarray_chkfinite[ShapeT: _Shape, ScalarT: np.generic]( + a: np.ndarray[ShapeT], dtype: _DTypeLike[ScalarT], order: _OrderKACF = None +) -> _Array[ShapeT, ScalarT]: ... +@overload +def asarray_chkfinite[ScalarT: np.generic]( + a: _ArrayLike[ScalarT], dtype: None = None, order: _OrderKACF = None +) -> NDArray[ScalarT]: ... @overload -def piecewise( +def asarray_chkfinite[ScalarT: np.generic]( + a: object, dtype: _DTypeLike[ScalarT], order: _OrderKACF = None +) -> NDArray[ScalarT]: ... +@overload +def asarray_chkfinite(a: object, dtype: DTypeLike | None = None, order: _OrderKACF = None) -> NDArray[Incomplete]: ... + +# NOTE: Contrary to the documentation, scalars are also accepted and treated as +# `[condlist]`. And even though the documentation says these should be boolean, in +# practice anything that `np.array(condlist, dtype=bool)` accepts will work, i.e. any +# array-like. +@overload +def piecewise[ShapeT: _Shape, ScalarT: np.generic, **Tss]( + x: _Array[ShapeT, ScalarT], + condlist: ArrayLike, + funclist: _PiecewiseFunctions[Any, Tss], + *args: Tss.args, + **kw: Tss.kwargs, +) -> _Array[ShapeT, ScalarT]: ... +@overload +def piecewise[ScalarT: np.generic, **Tss]( + x: _ArrayLike[ScalarT], + condlist: ArrayLike, + funclist: _PiecewiseFunctions[Any, Tss], + *args: Tss.args, + **kw: Tss.kwargs, +) -> NDArray[ScalarT]: ... +@overload +def piecewise[ScalarT: np.generic, **Tss]( x: ArrayLike, - condlist: _ArrayLike[bool_] | Sequence[_ArrayLikeBool_co], - funclist: Sequence[ - Callable[Concatenate[NDArray[Any], _Pss], NDArray[Any]] - | object - ], - /, - *args: _Pss.args, - **kw: _Pss.kwargs, -) -> NDArray[Any]: ... + condlist: ArrayLike, + funclist: _PiecewiseFunctions[ScalarT, Tss], + *args: Tss.args, + **kw: Tss.kwargs, +) -> NDArray[ScalarT]: ... + +# NOTE: condition is usually boolean, but anything with zero/non-zero semantics works +@overload +def extract[ScalarT: np.generic](condition: ArrayLike, arr: _ArrayLike[ScalarT]) -> _Array1D[ScalarT]: ... +@overload +def extract(condition: ArrayLike, arr: _SeqND[bool]) -> _Array1D[np.bool]: ... +@overload +def extract(condition: ArrayLike, arr: _ListSeqND[int]) -> _Array1D[np.int_]: ... +@overload +def extract(condition: ArrayLike, arr: _ListSeqND[float]) -> _Array1D[np.float64]: ... +@overload +def extract(condition: ArrayLike, arr: _ListSeqND[complex]) -> _Array1D[np.complex128]: ... +@overload +def extract(condition: ArrayLike, arr: _SeqND[bytes]) -> _Array1D[np.bytes_]: ... +@overload +def extract(condition: ArrayLike, arr: _SeqND[str]) -> _Array1D[np.str_]: ... +@overload +def extract(condition: ArrayLike, arr: ArrayLike) -> _Array1D[Incomplete]: ... +# NOTE: unlike `extract`, passing non-boolean conditions for `condlist` will raise an +# error at runtime +@overload +def select[ArrayT: np.ndarray]( + condlist: _SizedIterable[_ArrayLikeBool_co], + choicelist: Sequence[ArrayT], + default: _ScalarLike_co = 0, +) -> ArrayT: ... +@overload +def select[ScalarT: np.generic]( + condlist: _SizedIterable[_ArrayLikeBool_co], + choicelist: Sequence[_ArrayLike[ScalarT]] | NDArray[ScalarT], + default: _ScalarLike_co = 0, +) -> NDArray[ScalarT]: ... +@overload def select( - condlist: Sequence[ArrayLike], + condlist: _SizedIterable[_ArrayLikeBool_co], choicelist: Sequence[ArrayLike], - default: ArrayLike = ..., -) -> NDArray[Any]: ... + default: _ScalarLike_co = 0, +) -> np.ndarray: ... +# keep roughly in sync with `ma.core.copy` @overload -def copy( - a: _ArrayT, - order: _OrderKACF, - subok: L[True], -) -> _ArrayT: ... +def copy[ArrayT: np.ndarray](a: ArrayT, order: _OrderKACF, subok: L[True]) -> ArrayT: ... @overload -def copy( - a: _ArrayT, - order: _OrderKACF = ..., - *, - subok: L[True], -) -> _ArrayT: ... +def copy[ArrayT: np.ndarray](a: ArrayT, order: _OrderKACF = "K", *, subok: L[True]) -> ArrayT: ... @overload -def copy( - a: _ArrayLike[_ScalarT], - order: _OrderKACF = ..., - subok: L[False] = ..., -) -> NDArray[_ScalarT]: ... +def copy[ScalarT: np.generic](a: _ArrayLike[ScalarT], order: _OrderKACF = "K", subok: L[False] = False) -> NDArray[ScalarT]: ... @overload -def copy( - a: ArrayLike, - order: _OrderKACF = ..., - subok: L[False] = ..., -) -> NDArray[Any]: ... +def copy(a: ArrayLike, order: _OrderKACF = "K", subok: L[False] = False) -> NDArray[Incomplete]: ... +# +@overload # ?d, known inexact scalar-type +def gradient[ScalarT: np.inexact | np.timedelta64]( + f: _ArrayNoD[ScalarT], + *varargs: _ArrayLikeNumber_co, + axis: _ShapeLike | None = None, + edge_order: L[1, 2] = 1, + # `| Any` instead of ` | tuple` is returned to avoid several mypy_primer errors +) -> _Array1D[ScalarT] | Any: ... +@overload # 1d, known inexact scalar-type +def gradient[ScalarT: np.inexact | np.timedelta64]( + f: _Array1D[ScalarT], + *varargs: _ArrayLikeNumber_co, + axis: _ShapeLike | None = None, + edge_order: L[1, 2] = 1, +) -> _Array1D[ScalarT]: ... +@overload # 2d, known inexact scalar-type +def gradient[ScalarT: np.inexact | np.timedelta64]( + f: _Array2D[ScalarT], + *varargs: _ArrayLikeNumber_co, + axis: _ShapeLike | None = None, + edge_order: L[1, 2] = 1, +) -> _Mesh2[ScalarT, ScalarT]: ... +@overload # 3d, known inexact scalar-type +def gradient[ScalarT: np.inexact | np.timedelta64]( + f: _Array3D[ScalarT], + *varargs: _ArrayLikeNumber_co, + axis: _ShapeLike | None = None, + edge_order: L[1, 2] = 1, +) -> _Mesh3[ScalarT, ScalarT, ScalarT]: ... +@overload # ?d, datetime64 scalar-type +def gradient( + f: _ArrayNoD[np.datetime64], + *varargs: _ArrayLikeNumber_co, + axis: _ShapeLike | None = None, + edge_order: L[1, 2] = 1, +) -> _Array1D[np.timedelta64] | tuple[NDArray[np.timedelta64], ...]: ... +@overload # 1d, datetime64 scalar-type +def gradient( + f: _Array1D[np.datetime64], + *varargs: _ArrayLikeNumber_co, + axis: _ShapeLike | None = None, + edge_order: L[1, 2] = 1, +) -> _Array1D[np.timedelta64]: ... +@overload # 2d, datetime64 scalar-type +def gradient( + f: _Array2D[np.datetime64], + *varargs: _ArrayLikeNumber_co, + axis: _ShapeLike | None = None, + edge_order: L[1, 2] = 1, +) -> _Mesh2[np.timedelta64, np.timedelta64]: ... +@overload # 3d, datetime64 scalar-type +def gradient( + f: _Array3D[np.datetime64], + *varargs: _ArrayLikeNumber_co, + axis: _ShapeLike | None = None, + edge_order: L[1, 2] = 1, +) -> _Mesh3[np.timedelta64, np.timedelta64, np.timedelta64]: ... +@overload # 1d float-like +def gradient( + f: _Seq1D[float], + *varargs: _ArrayLikeNumber_co, + axis: _ShapeLike | None = None, + edge_order: L[1, 2] = 1, +) -> _Array1D[np.float64]: ... +@overload # 2d float-like +def gradient( + f: _Seq2D[float], + *varargs: _ArrayLikeNumber_co, + axis: _ShapeLike | None = None, + edge_order: L[1, 2] = 1, +) -> _Mesh2[np.float64, np.float64]: ... +@overload # 3d float-like +def gradient( + f: _Seq3D[float], + *varargs: _ArrayLikeNumber_co, + axis: _ShapeLike | None = None, + edge_order: L[1, 2] = 1, +) -> _Mesh3[np.float64, np.float64, np.float64]: ... +@overload # 1d complex-like (the `list` avoids overlap with the float-like overload) +def gradient( + f: list[complex], + *varargs: _ArrayLikeNumber_co, + axis: _ShapeLike | None = None, + edge_order: L[1, 2] = 1, +) -> _Array1D[np.complex128]: ... +@overload # 2d float-like +def gradient( + f: _Seq1D[list[complex]], + *varargs: _ArrayLikeNumber_co, + axis: _ShapeLike | None = None, + edge_order: L[1, 2] = 1, +) -> _Mesh2[np.complex128, np.complex128]: ... +@overload # 3d float-like +def gradient( + f: _Seq2D[list[complex]], + *varargs: _ArrayLikeNumber_co, + axis: _ShapeLike | None = None, + edge_order: L[1, 2] = 1, +) -> _Mesh3[np.complex128, np.complex128, np.complex128]: ... +@overload # fallback def gradient( f: ArrayLike, - *varargs: ArrayLike, - axis: _ShapeLike | None = ..., - edge_order: L[1, 2] = ..., -) -> Any: ... + *varargs: _ArrayLikeNumber_co, + axis: _ShapeLike | None = None, + edge_order: L[1, 2] = 1, +) -> Incomplete: ... -@overload -def diff( - a: _T, +# +@overload # n == 0; return input unchanged +def diff[T]( + a: T, n: L[0], - axis: SupportsIndex = ..., - prepend: ArrayLike = ..., - append: ArrayLike = ..., -) -> _T: ... -@overload + axis: SupportsIndex = -1, + prepend: ArrayLike | _NoValueType = ..., # = _NoValue + append: ArrayLike | _NoValueType = ..., # = _NoValue +) -> T: ... +@overload # known array-type +def diff[ArrayT: NDArray[_ScalarNumeric]]( + a: ArrayT, + n: int = 1, + axis: SupportsIndex = -1, + prepend: ArrayLike | _NoValueType = ..., + append: ArrayLike | _NoValueType = ..., +) -> ArrayT: ... +@overload # known shape, datetime64 +def diff[ShapeT: _Shape]( + a: _Array[ShapeT, np.datetime64], + n: int = 1, + axis: SupportsIndex = -1, + prepend: ArrayLike | _NoValueType = ..., + append: ArrayLike | _NoValueType = ..., +) -> _Array[ShapeT, np.timedelta64]: ... +@overload # unknown shape, known scalar-type +def diff[ScalarT: _ScalarNumeric]( + a: _ArrayLike[ScalarT], + n: int = 1, + axis: SupportsIndex = -1, + prepend: ArrayLike | _NoValueType = ..., + append: ArrayLike | _NoValueType = ..., +) -> NDArray[ScalarT]: ... +@overload # unknown shape, datetime64 +def diff( + a: _ArrayLike[np.datetime64], + n: int = 1, + axis: SupportsIndex = -1, + prepend: ArrayLike | _NoValueType = ..., + append: ArrayLike | _NoValueType = ..., +) -> NDArray[np.timedelta64]: ... +@overload # 1d int +def diff( + a: _Seq1D[int], + n: int = 1, + axis: SupportsIndex = -1, + prepend: ArrayLike | _NoValueType = ..., + append: ArrayLike | _NoValueType = ..., +) -> _Array1D[np.int_]: ... +@overload # 2d int +def diff( + a: _Seq2D[int], + n: int = 1, + axis: SupportsIndex = -1, + prepend: ArrayLike | _NoValueType = ..., + append: ArrayLike | _NoValueType = ..., +) -> _Array2D[np.int_]: ... +@overload # 1d float (the `list` avoids overlap with the `int` overloads) +def diff( + a: list[float], + n: int = 1, + axis: SupportsIndex = -1, + prepend: ArrayLike | _NoValueType = ..., + append: ArrayLike | _NoValueType = ..., +) -> _Array1D[np.float64]: ... +@overload # 2d float +def diff( + a: _Seq1D[list[float]], + n: int = 1, + axis: SupportsIndex = -1, + prepend: ArrayLike | _NoValueType = ..., + append: ArrayLike | _NoValueType = ..., +) -> _Array2D[np.float64]: ... +@overload # 1d complex (the `list` avoids overlap with the `int` overloads) +def diff( + a: list[complex], + n: int = 1, + axis: SupportsIndex = -1, + prepend: ArrayLike | _NoValueType = ..., + append: ArrayLike | _NoValueType = ..., +) -> _Array1D[np.complex128]: ... +@overload # 2d complex +def diff( + a: _Seq1D[list[complex]], + n: int = 1, + axis: SupportsIndex = -1, + prepend: ArrayLike | _NoValueType = ..., + append: ArrayLike | _NoValueType = ..., +) -> _Array2D[np.complex128]: ... +@overload # unknown shape, unknown scalar-type def diff( a: ArrayLike, - n: int = ..., - axis: SupportsIndex = ..., - prepend: ArrayLike = ..., - append: ArrayLike = ..., -) -> NDArray[Any]: ... + n: int = 1, + axis: SupportsIndex = -1, + prepend: ArrayLike | _NoValueType = ..., + append: ArrayLike | _NoValueType = ..., +) -> NDArray[Incomplete]: ... +# @overload # float scalar def interp( x: _FloatLike_co, @@ -313,71 +759,89 @@ def interp( left: _FloatLike_co | None = None, right: _FloatLike_co | None = None, period: _FloatLike_co | None = None, -) -> float64: ... -@overload # float array +) -> np.float64: ... +@overload # complex scalar def interp( - x: NDArray[floating | integer | np.bool] | _NestedSequence[_FloatLike_co], + x: _FloatLike_co, + xp: _ArrayLikeFloat_co, + fp: _ArrayLike1D[np.complexfloating] | list[complex], + left: _NumberLike_co | None = None, + right: _NumberLike_co | None = None, + period: _FloatLike_co | None = None, +) -> np.complex128: ... +@overload # float array +def interp[ShapeT: _Shape]( + x: _Array[ShapeT, _floating_co], xp: _ArrayLikeFloat_co, fp: _ArrayLikeFloat_co, left: _FloatLike_co | None = None, right: _FloatLike_co | None = None, period: _FloatLike_co | None = None, -) -> NDArray[float64]: ... -@overload # float scalar or array +) -> _Array[ShapeT, np.float64]: ... +@overload # complex array +def interp[ShapeT: _Shape]( + x: _Array[ShapeT, _floating_co], + xp: _ArrayLikeFloat_co, + fp: _ArrayLike1D[np.complexfloating] | list[complex], + left: _NumberLike_co | None = None, + right: _NumberLike_co | None = None, + period: _FloatLike_co | None = None, +) -> _Array[ShapeT, np.complex128]: ... +@overload # float sequence def interp( - x: _ArrayLikeFloat_co, + x: _Seq1D[_FloatLike_co], xp: _ArrayLikeFloat_co, fp: _ArrayLikeFloat_co, left: _FloatLike_co | None = None, right: _FloatLike_co | None = None, period: _FloatLike_co | None = None, -) -> NDArray[float64] | float64: ... -@overload # complex scalar +) -> _Array1D[np.float64]: ... +@overload # complex sequence def interp( - x: _FloatLike_co, + x: _Seq1D[_FloatLike_co], xp: _ArrayLikeFloat_co, - fp: _ArrayLike[complexfloating], + fp: _ArrayLike1D[np.complexfloating] | list[complex], left: _NumberLike_co | None = None, right: _NumberLike_co | None = None, period: _FloatLike_co | None = None, -) -> complex128: ... -@overload # complex or float scalar +) -> _Array1D[np.complex128]: ... +@overload # float array-like def interp( - x: _FloatLike_co, + x: _SeqND[_FloatLike_co], xp: _ArrayLikeFloat_co, - fp: Sequence[complex | complexfloating], - left: _NumberLike_co | None = None, - right: _NumberLike_co | None = None, + fp: _ArrayLikeFloat_co, + left: _FloatLike_co | None = None, + right: _FloatLike_co | None = None, period: _FloatLike_co | None = None, -) -> complex128 | float64: ... -@overload # complex array +) -> NDArray[np.float64]: ... +@overload # complex array-like def interp( - x: NDArray[floating | integer | np.bool] | _NestedSequence[_FloatLike_co], + x: _SeqND[_FloatLike_co], xp: _ArrayLikeFloat_co, - fp: _ArrayLike[complexfloating], + fp: _ArrayLike1D[np.complexfloating] | list[complex], left: _NumberLike_co | None = None, right: _NumberLike_co | None = None, period: _FloatLike_co | None = None, -) -> NDArray[complex128]: ... -@overload # complex or float array +) -> NDArray[np.complex128]: ... +@overload # float scalar/array-like def interp( - x: NDArray[floating | integer | np.bool] | _NestedSequence[_FloatLike_co], + x: _ArrayLikeFloat_co, xp: _ArrayLikeFloat_co, - fp: Sequence[complex | complexfloating], - left: _NumberLike_co | None = None, - right: _NumberLike_co | None = None, + fp: _ArrayLikeFloat_co, + left: _FloatLike_co | None = None, + right: _FloatLike_co | None = None, period: _FloatLike_co | None = None, -) -> NDArray[complex128 | float64]: ... -@overload # complex scalar or array +) -> NDArray[np.float64] | np.float64: ... +@overload # complex scalar/array-like def interp( x: _ArrayLikeFloat_co, xp: _ArrayLikeFloat_co, - fp: _ArrayLike[complexfloating], + fp: _ArrayLike1D[np.complexfloating], left: _NumberLike_co | None = None, right: _NumberLike_co | None = None, period: _FloatLike_co | None = None, -) -> NDArray[complex128] | complex128: ... -@overload # complex or float scalar or array +) -> NDArray[np.complex128] | np.complex128: ... +@overload # float/complex scalar/array-like def interp( x: _ArrayLikeFloat_co, xp: _ArrayLikeFloat_co, @@ -385,601 +849,1459 @@ def interp( left: _NumberLike_co | None = None, right: _NumberLike_co | None = None, period: _FloatLike_co | None = None, -) -> NDArray[complex128 | float64] | complex128 | float64: ... +) -> NDArray[np.complex128 | np.float64] | np.complex128 | np.float64: ... -@overload -def angle(z: _ComplexLike_co, deg: bool = ...) -> floating: ... -@overload -def angle(z: object_, deg: bool = ...) -> Any: ... -@overload -def angle(z: _ArrayLikeComplex_co, deg: bool = ...) -> NDArray[floating]: ... -@overload -def angle(z: _ArrayLikeObject_co, deg: bool = ...) -> NDArray[object_]: ... +# +@overload # 0d T: floating -> 0d T +def angle[FloatingT: np.floating](z: FloatingT, deg: bool = False) -> FloatingT: ... +@overload # 0d complex | float | ~integer -> 0d float64 +def angle(z: complex | _integer_co, deg: bool = False) -> np.float64: ... +@overload # 0d complex64 -> 0d float32 +def angle(z: np.complex64, deg: bool = False) -> np.float32: ... +@overload # 0d clongdouble -> 0d longdouble +def angle(z: np.clongdouble, deg: bool = False) -> np.longdouble: ... +@overload # T: nd floating -> T +def angle[ArrayFloatingT: NDArray[np.floating]](z: ArrayFloatingT, deg: bool = False) -> ArrayFloatingT: ... +@overload # nd T: complex128 | ~integer -> nd float64 +def angle[ShapeT: _Shape](z: _Array[ShapeT, np.complex128 | _integer_co], deg: bool = False) -> _Array[ShapeT, np.float64]: ... +@overload # nd T: complex64 -> nd float32 +def angle[ShapeT: _Shape](z: _Array[ShapeT, np.complex64], deg: bool = False) -> _Array[ShapeT, np.float32]: ... +@overload # nd T: clongdouble -> nd longdouble +def angle[ShapeT: _Shape](z: _Array[ShapeT, np.clongdouble], deg: bool = False) -> _Array[ShapeT, np.longdouble]: ... +@overload # 1d complex -> 1d float64 +def angle(z: _Seq1D[complex], deg: bool = False) -> _Array1D[np.float64]: ... +@overload # 2d complex -> 2d float64 +def angle(z: _Seq2D[complex], deg: bool = False) -> _Array2D[np.float64]: ... +@overload # 3d complex -> 3d float64 +def angle(z: _Seq3D[complex], deg: bool = False) -> _Array3D[np.float64]: ... +@overload # fallback +def angle(z: _ArrayLikeComplex_co, deg: bool = False) -> NDArray[np.floating] | Any: ... -@overload +# +@overload # known array-type +def unwrap[ArrayT: NDArray[np.floating | np.object_]]( + p: ArrayT, + discont: float | None = None, + axis: int = -1, + *, + period: float = ..., # = Ī„ +) -> ArrayT: ... +@overload # known shape, float64 +def unwrap[ShapeT: _Shape]( + p: _Array[ShapeT, _float64_co], + discont: float | None = None, + axis: int = -1, + *, + period: float = ..., # = Ī„ +) -> _Array[ShapeT, np.float64]: ... +@overload # 1d float64-like def unwrap( - p: _ArrayLikeFloat_co, - discont: float | None = ..., - axis: int = ..., + p: _Seq1D[float | _float64_co], + discont: float | None = None, + axis: int = -1, *, - period: float = ..., -) -> NDArray[floating]: ... -@overload + period: float = ..., # = Ī„ +) -> _Array1D[np.float64]: ... +@overload # 2d float64-like def unwrap( - p: _ArrayLikeObject_co, - discont: float | None = ..., - axis: int = ..., + p: _Seq2D[float | _float64_co], + discont: float | None = None, + axis: int = -1, *, - period: float = ..., -) -> NDArray[object_]: ... - -def sort_complex(a: ArrayLike) -> NDArray[complexfloating]: ... - -def trim_zeros( - filt: _TrimZerosSequence[_T], - trim: L["f", "b", "fb", "bf"] = ..., -) -> _T: ... + period: float = ..., # = Ī„ +) -> _Array2D[np.float64]: ... +@overload # 3d float64-like +def unwrap( + p: _Seq3D[float | _float64_co], + discont: float | None = None, + axis: int = -1, + *, + period: float = ..., # = Ī„ +) -> _Array3D[np.float64]: ... +@overload # ?d, float64 +def unwrap( + p: _SeqND[float] | _ArrayLike[_float64_co], + discont: float | None = None, + axis: int = -1, + *, + period: float = ..., # = Ī„ +) -> NDArray[np.float64]: ... +@overload # fallback +def unwrap( + p: _ArrayLikeFloat_co | _ArrayLikeObject_co, + discont: float | None = None, + axis: int = -1, + *, + period: float = ..., # = Ī„ +) -> np.ndarray: ... -@overload -def extract(condition: ArrayLike, arr: _ArrayLike[_ScalarT]) -> NDArray[_ScalarT]: ... -@overload -def extract(condition: ArrayLike, arr: ArrayLike) -> NDArray[Any]: ... +# +@overload +def sort_complex[ArrayT: NDArray[np.complexfloating]](a: ArrayT) -> ArrayT: ... +@overload # complex64, shape known +def sort_complex[ShapeT: _Shape](a: _Array[ShapeT, np.int8 | np.uint8 | np.int16 | np.uint16]) -> _Array[ShapeT, np.complex64]: ... +@overload # complex64, shape unknown +def sort_complex(a: _ArrayLike[np.int8 | np.uint8 | np.int16 | np.uint16]) -> NDArray[np.complex64]: ... +@overload # complex128, shape known +def sort_complex[ShapeT: _Shape](a: _Array[ShapeT, _SortsToComplex128]) -> _Array[ShapeT, np.complex128]: ... +@overload # complex128, shape unknown +def sort_complex(a: _ArrayLike[_SortsToComplex128]) -> NDArray[np.complex128]: ... +@overload # clongdouble, shape known +def sort_complex[ShapeT: _Shape](a: _Array[ShapeT, np.longdouble]) -> _Array[ShapeT, np.clongdouble]: ... +@overload # clongdouble, shape unknown +def sort_complex(a: _ArrayLike[np.longdouble]) -> NDArray[np.clongdouble]: ... -def place(arr: NDArray[Any], mask: ArrayLike, vals: Any) -> None: ... +# +def trim_zeros[T](filt: _TrimZerosSequence[T], trim: L["f", "b", "fb", "bf"] = "fb", axis: _ShapeLike | None = None) -> T: ... -@overload +# NOTE: keep in sync with `corrcoef` +@overload # ?d, known inexact scalar-type >=64 precision, y=. +def cov[ScalarT: _InexactDouble]( + m: _ArrayLike[ScalarT], + y: _ArrayLike[ScalarT], + rowvar: bool = True, + bias: bool = False, + ddof: SupportsIndex | SupportsInt | None = None, + fweights: _ArrayLikeInt_co | None = None, + aweights: _ArrayLikeFloat_co | None = None, + *, + dtype: None = None, +) -> _Array2D[ScalarT]: ... +@overload # ?d, known inexact scalar-type >=64 precision, y=None -> 0d or 2d +def cov[ScalarT: _InexactDouble]( + m: _ArrayNoD[ScalarT], + y: None = None, + rowvar: bool = True, + bias: bool = False, + ddof: SupportsIndex | SupportsInt | None = None, + fweights: _ArrayLikeInt_co | None = None, + aweights: _ArrayLikeFloat_co | None = None, + *, + dtype: _DTypeLike[ScalarT] | None = None, +) -> NDArray[ScalarT]: ... +@overload # 1d, known inexact scalar-type >=64 precision, y=None +def cov[ScalarT: _InexactDouble]( + m: _Array1D[ScalarT], + y: None = None, + rowvar: bool = True, + bias: bool = False, + ddof: SupportsIndex | SupportsInt | None = None, + fweights: _ArrayLikeInt_co | None = None, + aweights: _ArrayLikeFloat_co | None = None, + *, + dtype: _DTypeLike[ScalarT] | None = None, +) -> _Array0D[ScalarT]: ... +@overload # nd, known inexact scalar-type >=64 precision, y=None -> 0d or 2d +def cov[ScalarT: _InexactDouble]( + m: _ArrayLike[ScalarT], + y: None = None, + rowvar: bool = True, + bias: bool = False, + ddof: SupportsIndex | SupportsInt | None = None, + fweights: _ArrayLikeInt_co | None = None, + aweights: _ArrayLikeFloat_co | None = None, + *, + dtype: _DTypeLike[ScalarT] | None = None, +) -> NDArray[ScalarT]: ... +@overload # nd, casts to float64, y= def cov( - m: _ArrayLikeFloat_co, - y: _ArrayLikeFloat_co | None = ..., - rowvar: bool = ..., - bias: bool = ..., - ddof: SupportsIndex | SupportsInt | None = ..., - fweights: ArrayLike | None = ..., - aweights: ArrayLike | None = ..., + m: NDArray[np.float32 | np.float16 | _integer_co] | _Seq1D[float] | _Seq2D[float], + y: NDArray[np.float32 | np.float16 | _integer_co] | _Seq1D[float] | _Seq2D[float], + rowvar: bool = True, + bias: bool = False, + ddof: SupportsIndex | SupportsInt | None = None, + fweights: _ArrayLikeInt_co | None = None, + aweights: _ArrayLikeFloat_co | None = None, *, - dtype: None = ..., -) -> NDArray[floating]: ... -@overload + dtype: _DTypeLike[np.float64] | None = None, +) -> _Array2D[np.float64]: ... +@overload # ?d or 2d, casts to float64, y=None -> 0d or 2d def cov( - m: _ArrayLikeComplex_co, - y: _ArrayLikeComplex_co | None = ..., - rowvar: bool = ..., - bias: bool = ..., - ddof: SupportsIndex | SupportsInt | None = ..., - fweights: ArrayLike | None = ..., - aweights: ArrayLike | None = ..., + m: _ArrayNoD[np.float32 | np.float16 | _integer_co] | _Seq2D[float], + y: None = None, + rowvar: bool = True, + bias: bool = False, + ddof: SupportsIndex | SupportsInt | None = None, + fweights: _ArrayLikeInt_co | None = None, + aweights: _ArrayLikeFloat_co | None = None, *, - dtype: None = ..., -) -> NDArray[complexfloating]: ... -@overload + dtype: _DTypeLike[np.float64] | None = None, +) -> NDArray[np.float64]: ... +@overload # 1d, casts to float64, y=None def cov( - m: _ArrayLikeComplex_co, - y: _ArrayLikeComplex_co | None = ..., - rowvar: bool = ..., - bias: bool = ..., - ddof: SupportsIndex | SupportsInt | None = ..., - fweights: ArrayLike | None = ..., - aweights: ArrayLike | None = ..., + m: _Array1D[np.float32 | np.float16 | _integer_co] | _Seq1D[float], + y: None = None, + rowvar: bool = True, + bias: bool = False, + ddof: SupportsIndex | SupportsInt | None = None, + fweights: _ArrayLikeInt_co | None = None, + aweights: _ArrayLikeFloat_co | None = None, *, - dtype: _DTypeLike[_ScalarT], -) -> NDArray[_ScalarT]: ... -@overload + dtype: _DTypeLike[np.float64] | None = None, +) -> _Array0D[np.float64]: ... +@overload # nd, casts to float64, y=None -> 0d or 2d def cov( - m: _ArrayLikeComplex_co, - y: _ArrayLikeComplex_co | None = ..., - rowvar: bool = ..., - bias: bool = ..., - ddof: SupportsIndex | SupportsInt | None = ..., - fweights: ArrayLike | None = ..., - aweights: ArrayLike | None = ..., + m: _ArrayLike[np.float32 | np.float16 | _integer_co], + y: None = None, + rowvar: bool = True, + bias: bool = False, + ddof: SupportsIndex | SupportsInt | None = None, + fweights: _ArrayLikeInt_co | None = None, + aweights: _ArrayLikeFloat_co | None = None, *, - dtype: DTypeLike, -) -> NDArray[Any]: ... - -# NOTE `bias` and `ddof` are deprecated and ignored -@overload -def corrcoef( - m: _ArrayLikeFloat_co, - y: _ArrayLikeFloat_co | None = None, + dtype: _DTypeLike[np.float64] | None = None, +) -> NDArray[np.float64]: ... +@overload # 1d complex, y= (`list` avoids overlap with float overloads) +def cov( + m: list[complex] | _Seq1D[list[complex]], + y: list[complex] | _Seq1D[list[complex]], rowvar: bool = True, - bias: _NoValueType = ..., - ddof: _NoValueType = ..., + bias: bool = False, + ddof: SupportsIndex | SupportsInt | None = None, + fweights: _ArrayLikeInt_co | None = None, + aweights: _ArrayLikeFloat_co | None = None, *, - dtype: None = None, -) -> NDArray[floating]: ... -@overload -def corrcoef( - m: _ArrayLikeComplex_co, - y: _ArrayLikeComplex_co | None = None, + dtype: _DTypeLike[np.complex128] | None = None, +) -> _Array2D[np.complex128]: ... +@overload # 1d complex, y=None +def cov( + m: list[complex], + y: None = None, rowvar: bool = True, - bias: _NoValueType = ..., - ddof: _NoValueType = ..., + bias: bool = False, + ddof: SupportsIndex | SupportsInt | None = None, + fweights: _ArrayLikeInt_co | None = None, + aweights: _ArrayLikeFloat_co | None = None, *, - dtype: None = None, -) -> NDArray[complexfloating]: ... -@overload -def corrcoef( + dtype: _DTypeLike[np.complex128] | None = None, +) -> _Array0D[np.complex128]: ... +@overload # 2d complex, y=None -> 0d or 2d +def cov( + m: _Seq1D[list[complex]], + y: None = None, + rowvar: bool = True, + bias: bool = False, + ddof: SupportsIndex | SupportsInt | None = None, + fweights: _ArrayLikeInt_co | None = None, + aweights: _ArrayLikeFloat_co | None = None, + *, + dtype: _DTypeLike[np.complex128] | None = None, +) -> NDArray[np.complex128]: ... +@overload # 1d complex-like, y=None, dtype= +def cov[ScalarT: np.generic]( + m: _Seq1D[_ComplexLike_co], + y: None = None, + rowvar: bool = True, + bias: bool = False, + ddof: SupportsIndex | SupportsInt | None = None, + fweights: _ArrayLikeInt_co | None = None, + aweights: _ArrayLikeFloat_co | None = None, + *, + dtype: _DTypeLike[ScalarT], +) -> _Array0D[ScalarT]: ... +@overload # nd complex-like, y=, dtype= +def cov[ScalarT: np.generic]( m: _ArrayLikeComplex_co, - y: _ArrayLikeComplex_co | None = None, + y: _ArrayLikeComplex_co, rowvar: bool = True, - bias: _NoValueType = ..., - ddof: _NoValueType = ..., + bias: bool = False, + ddof: SupportsIndex | SupportsInt | None = None, + fweights: _ArrayLikeInt_co | None = None, + aweights: _ArrayLikeFloat_co | None = None, *, - dtype: _DTypeLike[_ScalarT], -) -> NDArray[_ScalarT]: ... -@overload -def corrcoef( + dtype: _DTypeLike[ScalarT], +) -> _Array2D[ScalarT]: ... +@overload # nd complex-like, y=None, dtype= -> 0d or 2d +def cov[ScalarT: np.generic]( + m: _ArrayLikeComplex_co, + y: None = None, + rowvar: bool = True, + bias: bool = False, + ddof: SupportsIndex | SupportsInt | None = None, + fweights: _ArrayLikeInt_co | None = None, + aweights: _ArrayLikeFloat_co | None = None, + *, + dtype: _DTypeLike[ScalarT], +) -> NDArray[ScalarT]: ... +@overload # nd complex-like, y=, dtype=? +def cov( + m: _ArrayLikeComplex_co, + y: _ArrayLikeComplex_co, + rowvar: bool = True, + bias: bool = False, + ddof: SupportsIndex | SupportsInt | None = None, + fweights: _ArrayLikeInt_co | None = None, + aweights: _ArrayLikeFloat_co | None = None, + *, + dtype: DTypeLike | None = None, +) -> _Array2D[Incomplete]: ... +@overload # 1d complex-like, y=None, dtype=? +def cov( + m: _Seq1D[_ComplexLike_co], + y: None = None, + rowvar: bool = True, + bias: bool = False, + ddof: SupportsIndex | SupportsInt | None = None, + fweights: _ArrayLikeInt_co | None = None, + aweights: _ArrayLikeFloat_co | None = None, + *, + dtype: DTypeLike | None = None, +) -> _Array0D[Incomplete]: ... +@overload # nd complex-like, dtype=? +def cov( m: _ArrayLikeComplex_co, y: _ArrayLikeComplex_co | None = None, rowvar: bool = True, - bias: _NoValueType = ..., - ddof: _NoValueType = ..., + bias: bool = False, + ddof: SupportsIndex | SupportsInt | None = None, + fweights: _ArrayLikeInt_co | None = None, + aweights: _ArrayLikeFloat_co | None = None, *, dtype: DTypeLike | None = None, -) -> NDArray[Any]: ... +) -> NDArray[Incomplete]: ... -def blackman(M: _FloatLike_co) -> NDArray[floating]: ... - -def bartlett(M: _FloatLike_co) -> NDArray[floating]: ... - -def hanning(M: _FloatLike_co) -> NDArray[floating]: ... - -def hamming(M: _FloatLike_co) -> NDArray[floating]: ... - -def i0(x: _ArrayLikeFloat_co) -> NDArray[floating]: ... +# NOTE: If only `x` is given and the resulting array has shape (1,1), a bare scalar +# is returned instead of a 2D array. When y is given, a 2D array is always returned. +# This differs from `cov`, which returns 0-D arrays instead of scalars in such cases. +# NOTE: keep in sync with `cov` +@overload # ?d, known inexact scalar-type >=64 precision, y=. +def corrcoef[ScalarT: _InexactDouble]( + x: _ArrayLike[ScalarT], + y: _ArrayLike[ScalarT], + rowvar: bool = True, + *, + dtype: _DTypeLike[ScalarT] | None = None, +) -> _Array2D[ScalarT]: ... +@overload # ?d, known inexact scalar-type >=64 precision, y=None +def corrcoef[ScalarT: _InexactDouble]( + x: _ArrayNoD[ScalarT], + y: None = None, + rowvar: bool = True, + *, + dtype: _DTypeLike[ScalarT] | None = None, +) -> _Array2D[ScalarT] | ScalarT: ... +@overload # 1d, known inexact scalar-type >=64 precision, y=None +def corrcoef[ScalarT: _InexactDouble]( + x: _Array1D[ScalarT], + y: None = None, + rowvar: bool = True, + *, + dtype: _DTypeLike[ScalarT] | None = None, +) -> ScalarT: ... +@overload # nd, known inexact scalar-type >=64 precision, y=None +def corrcoef[ScalarT: _InexactDouble]( + x: _ArrayLike[ScalarT], + y: None = None, + rowvar: bool = True, + *, + dtype: _DTypeLike[ScalarT] | None = None, +) -> _Array2D[ScalarT] | ScalarT: ... +@overload # nd, casts to float64, y= +def corrcoef( + x: NDArray[np.float32 | np.float16 | _integer_co] | _Seq1D[float] | _Seq2D[float], + y: NDArray[np.float32 | np.float16 | _integer_co] | _Seq1D[float] | _Seq2D[float], + rowvar: bool = True, + *, + dtype: _DTypeLike[np.float64] | None = None, +) -> _Array2D[np.float64]: ... +@overload # ?d or 2d, casts to float64, y=None +def corrcoef( + x: _ArrayNoD[np.float32 | np.float16 | _integer_co] | _Seq2D[float], + y: None = None, + rowvar: bool = True, + *, + dtype: _DTypeLike[np.float64] | None = None, +) -> _Array2D[np.float64] | np.float64: ... +@overload # 1d, casts to float64, y=None +def corrcoef( + x: _Array1D[np.float32 | np.float16 | _integer_co] | _Seq1D[float], + y: None = None, + rowvar: bool = True, + *, + dtype: _DTypeLike[np.float64] | None = None, +) -> np.float64: ... +@overload # nd, casts to float64, y=None +def corrcoef( + x: _ArrayLike[np.float32 | np.float16 | _integer_co], + y: None = None, + rowvar: bool = True, + *, + dtype: _DTypeLike[np.float64] | None = None, +) -> _Array2D[np.float64] | np.float64: ... +@overload # 1d complex, y= (`list` avoids overlap with float overloads) +def corrcoef( + x: list[complex] | _Seq1D[list[complex]], + y: list[complex] | _Seq1D[list[complex]], + rowvar: bool = True, + *, + dtype: _DTypeLike[np.complex128] | None = None, +) -> _Array2D[np.complex128]: ... +@overload # 1d complex, y=None +def corrcoef( + x: list[complex], + y: None = None, + rowvar: bool = True, + *, + dtype: _DTypeLike[np.complex128] | None = None, +) -> np.complex128: ... +@overload # 2d complex, y=None +def corrcoef( + x: _Seq1D[list[complex]], + y: None = None, + rowvar: bool = True, + *, + dtype: _DTypeLike[np.complex128] | None = None, +) -> _Array2D[np.complex128] | np.complex128: ... +@overload # 1d complex-like, y=None, dtype= +def corrcoef[ScalarT: np.generic]( + x: _Seq1D[_ComplexLike_co], + y: None = None, + rowvar: bool = True, + *, + dtype: _DTypeLike[ScalarT], +) -> ScalarT: ... +@overload # nd complex-like, y=, dtype= +def corrcoef[ScalarT: np.generic]( + x: _ArrayLikeComplex_co, + y: _ArrayLikeComplex_co, + rowvar: bool = True, + *, + dtype: _DTypeLike[ScalarT], +) -> _Array2D[ScalarT]: ... +@overload # nd complex-like, y=None, dtype= +def corrcoef[ScalarT: np.generic]( + x: _ArrayLikeComplex_co, + y: None = None, + rowvar: bool = True, + *, + dtype: _DTypeLike[ScalarT], +) -> _Array2D[ScalarT] | ScalarT: ... +@overload # nd complex-like, y=, dtype=? +def corrcoef( + x: _ArrayLikeComplex_co, + y: _ArrayLikeComplex_co, + rowvar: bool = True, + *, + dtype: DTypeLike | None = None, +) -> _Array2D[Incomplete]: ... +@overload # 1d complex-like, y=None, dtype=? +def corrcoef( + x: _Seq1D[_ComplexLike_co], + y: None = None, + rowvar: bool = True, + *, + dtype: DTypeLike | None = None, +) -> Incomplete: ... +@overload # nd complex-like, dtype=? +def corrcoef( + x: _ArrayLikeComplex_co, + y: _ArrayLikeComplex_co | None = None, + rowvar: bool = True, + *, + dtype: DTypeLike | None = None, +) -> _Array2D[Incomplete] | Incomplete: ... -def kaiser( - M: _FloatLike_co, - beta: _FloatLike_co, -) -> NDArray[floating]: ... +# note that floating `M` are accepted, but their fractional part is ignored +def blackman(M: _FloatLike_co) -> _Array1D[np.float64]: ... +def bartlett(M: _FloatLike_co) -> _Array1D[np.float64]: ... +def hanning(M: _FloatLike_co) -> _Array1D[np.float64]: ... +def hamming(M: _FloatLike_co) -> _Array1D[np.float64]: ... +def kaiser(M: _FloatLike_co, beta: _FloatLike_co) -> _Array1D[np.float64]: ... +# +@overload +def i0[ShapeT: _Shape](x: _Array[ShapeT, np.floating | np.integer]) -> _Array[ShapeT, np.float64]: ... @overload -def sinc(x: _FloatLike_co) -> floating: ... +def i0(x: _FloatLike_co) -> _Array0D[np.float64]: ... @overload -def sinc(x: _ComplexLike_co) -> complexfloating: ... +def i0(x: _Seq1D[_FloatLike_co]) -> _Array1D[np.float64]: ... @overload -def sinc(x: _ArrayLikeFloat_co) -> NDArray[floating]: ... +def i0(x: _Seq2D[_FloatLike_co]) -> _Array2D[np.float64]: ... @overload -def sinc(x: _ArrayLikeComplex_co) -> NDArray[complexfloating]: ... +def i0(x: _Seq3D[_FloatLike_co]) -> _Array3D[np.float64]: ... +@overload +def i0(x: _ArrayLikeFloat_co) -> NDArray[np.float64]: ... +# @overload -def median( - a: _ArrayLikeFloat_co, - axis: None = ..., - out: None = ..., - overwrite_input: bool = ..., - keepdims: L[False] = ..., -) -> floating: ... +def sinc[ScalarT: np.inexact](x: ScalarT) -> ScalarT: ... @overload -def median( - a: _ArrayLikeComplex_co, - axis: None = ..., - out: None = ..., - overwrite_input: bool = ..., - keepdims: L[False] = ..., -) -> complexfloating: ... +def sinc(x: float | _float64_co) -> np.float64: ... @overload -def median( - a: _ArrayLikeTD64_co, - axis: None = ..., - out: None = ..., - overwrite_input: bool = ..., - keepdims: L[False] = ..., -) -> timedelta64: ... +def sinc(x: complex) -> np.complex128 | Any: ... @overload -def median( - a: _ArrayLikeObject_co, - axis: None = ..., - out: None = ..., - overwrite_input: bool = ..., - keepdims: L[False] = ..., -) -> Any: ... +def sinc[ArrayT: NDArray[np.inexact]](x: ArrayT) -> ArrayT: ... @overload -def median( - a: _ArrayLikeFloat_co | _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeObject_co, - axis: _ShapeLike | None = ..., - out: None = ..., - overwrite_input: bool = ..., - keepdims: bool = ..., -) -> Any: ... +def sinc[ShapeT: _Shape](x: _Array[ShapeT, _integer_co]) -> _Array[ShapeT, np.float64]: ... @overload -def median( - a: _ArrayLikeFloat_co | _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeObject_co, - axis: _ShapeLike | None, - out: _ArrayT, - overwrite_input: bool = ..., - keepdims: bool = ..., -) -> _ArrayT: ... +def sinc(x: _Seq1D[float]) -> _Array1D[np.float64]: ... +@overload +def sinc(x: _Seq2D[float]) -> _Array2D[np.float64]: ... +@overload +def sinc(x: _Seq3D[float]) -> _Array3D[np.float64]: ... @overload +def sinc(x: _SeqND[float]) -> NDArray[np.float64]: ... +@overload +def sinc(x: list[complex]) -> _Array1D[np.complex128]: ... +@overload +def sinc(x: _Seq1D[list[complex]]) -> _Array2D[np.complex128]: ... +@overload +def sinc(x: _Seq2D[list[complex]]) -> _Array3D[np.complex128]: ... +@overload +def sinc(x: _ArrayLikeComplex_co) -> np.ndarray | Any: ... + +# NOTE: We assume that `axis` is only provided for >=1-D arrays because for <1-D arrays +# it has no effect, and would complicate the overloads significantly. +@overload # known scalar-type, keepdims=False (default) +def median[ScalarT: np.inexact | np.timedelta64]( + a: _ArrayLike[ScalarT], + axis: None = None, + out: None = None, + overwrite_input: bool = False, + keepdims: L[False] = False, +) -> ScalarT: ... +@overload # float array-like, keepdims=False (default) def median( - a: _ArrayLikeFloat_co | _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeObject_co, - axis: _ShapeLike | None = ..., + a: _ArrayLikeInt_co | _SeqND[float] | float, + axis: None = None, + out: None = None, + overwrite_input: bool = False, + keepdims: L[False] = False, +) -> np.float64: ... +@overload # complex array-like, keepdims=False (default) +def median( + a: _ListSeqND[complex], + axis: None = None, + out: None = None, + overwrite_input: bool = False, + keepdims: L[False] = False, +) -> np.complex128: ... +@overload # complex scalar, keepdims=False (default) +def median( + a: complex, + axis: None = None, + out: None = None, + overwrite_input: bool = False, + keepdims: L[False] = False, +) -> np.complex128 | Any: ... +@overload # known array-type, keepdims=True +def median[ArrayT: NDArray[_ScalarNumeric]]( + a: ArrayT, + axis: _ShapeLike | None = None, + out: None = None, + overwrite_input: bool = False, *, - out: _ArrayT, - overwrite_input: bool = ..., - keepdims: bool = ..., -) -> _ArrayT: ... - -_MethodKind = L[ - "inverted_cdf", - "averaged_inverted_cdf", - "closest_observation", - "interpolated_inverted_cdf", - "hazen", - "weibull", - "linear", - "median_unbiased", - "normal_unbiased", - "lower", - "higher", - "midpoint", - "nearest", -] + keepdims: L[True], +) -> ArrayT: ... +@overload # known scalar-type, keepdims=True +def median[ScalarT: _ScalarNumeric]( + a: _ArrayLike[ScalarT], + axis: _ShapeLike | None = None, + out: None = None, + overwrite_input: bool = False, + *, + keepdims: L[True], +) -> NDArray[ScalarT]: ... +@overload # known scalar-type, axis= +def median[ScalarT: _ScalarNumeric]( + a: _ArrayLike[ScalarT], + axis: _ShapeLike, + out: None = None, + overwrite_input: bool = False, + keepdims: bool = False, +) -> NDArray[ScalarT]: ... +@overload # float array-like, keepdims=True +def median( + a: _SeqND[float], + axis: _ShapeLike | None = None, + out: None = None, + overwrite_input: bool = False, + *, + keepdims: L[True], +) -> NDArray[np.float64]: ... +@overload # float array-like, axis= +def median( + a: _SeqND[float], + axis: _ShapeLike, + out: None = None, + overwrite_input: bool = False, + keepdims: bool = False, +) -> NDArray[np.float64]: ... +@overload # complex array-like, keepdims=True +def median( + a: _ListSeqND[complex], + axis: _ShapeLike | None = None, + out: None = None, + overwrite_input: bool = False, + *, + keepdims: L[True], +) -> NDArray[np.complex128]: ... +@overload # complex array-like, axis= +def median( + a: _ListSeqND[complex], + axis: _ShapeLike, + out: None = None, + overwrite_input: bool = False, + keepdims: bool = False, +) -> NDArray[np.complex128]: ... +@overload # out= (keyword) +def median[ArrayT: np.ndarray]( + a: _ArrayLikeComplex_co | _ArrayLike[np.timedelta64 | np.object_], + axis: _ShapeLike | None = None, + *, + out: ArrayT, + overwrite_input: bool = False, + keepdims: bool = False, +) -> ArrayT: ... +@overload # out= (positional) +def median[ArrayT: np.ndarray]( + a: _ArrayLikeComplex_co | _ArrayLike[np.timedelta64 | np.object_], + axis: _ShapeLike | None, + out: ArrayT, + overwrite_input: bool = False, + keepdims: bool = False, +) -> ArrayT: ... +@overload # fallback +def median( + a: _ArrayLikeComplex_co | _ArrayLike[np.timedelta64 | np.object_], + axis: _ShapeLike | None = None, + out: None = None, + overwrite_input: bool = False, + keepdims: bool = False, +) -> Incomplete: ... -@overload +# NOTE: keep in sync with `quantile` +@overload # inexact, scalar, axis=None +def percentile[ScalarT: np.inexact | np.timedelta64 | np.datetime64]( + a: _ArrayLike[ScalarT], + q: _FloatLike_co, + axis: None = None, + out: None = None, + overwrite_input: bool = False, + method: _InterpolationMethod = "linear", + keepdims: L[False] = False, + *, + weights: _ArrayLikeFloat_co | None = None, +) -> ScalarT: ... +@overload # inexact, scalar, axis= +def percentile[ScalarT: np.inexact | np.timedelta64 | np.datetime64]( + a: _ArrayLike[ScalarT], + q: _FloatLike_co, + axis: _ShapeLike, + out: None = None, + overwrite_input: bool = False, + method: _InterpolationMethod = "linear", + keepdims: L[False] = False, + *, + weights: _ArrayLikeFloat_co | None = None, +) -> NDArray[ScalarT]: ... +@overload # inexact, scalar, keepdims=True +def percentile[ScalarT: np.inexact | np.timedelta64 | np.datetime64]( + a: _ArrayLike[ScalarT], + q: _FloatLike_co, + axis: _ShapeLike | None = None, + out: None = None, + overwrite_input: bool = False, + method: _InterpolationMethod = "linear", + *, + keepdims: L[True], + weights: _ArrayLikeFloat_co | None = None, +) -> NDArray[ScalarT]: ... +@overload # inexact, array, axis=None +def percentile[ScalarT: np.inexact | np.timedelta64 | np.datetime64, ShapeT: _Shape]( + a: _ArrayLike[ScalarT], + q: _Array[ShapeT, _floating_co], + axis: None = None, + out: None = None, + overwrite_input: bool = False, + method: _InterpolationMethod = "linear", + keepdims: L[False] = False, + *, + weights: _ArrayLikeFloat_co | None = None, +) -> _Array[ShapeT, ScalarT]: ... +@overload # inexact, array-like +def percentile[ScalarT: np.inexact | np.timedelta64 | np.datetime64]( + a: _ArrayLike[ScalarT], + q: NDArray[_floating_co] | _SeqND[_FloatLike_co], + axis: _ShapeLike | None = None, + out: None = None, + overwrite_input: bool = False, + method: _InterpolationMethod = "linear", + keepdims: bool = False, + *, + weights: _ArrayLikeFloat_co | None = None, +) -> NDArray[ScalarT]: ... +@overload # float, scalar, axis=None def percentile( - a: _ArrayLikeFloat_co, + a: _SeqND[float] | _ArrayLikeInt_co, q: _FloatLike_co, - axis: None = ..., - out: None = ..., - overwrite_input: bool = ..., - method: _MethodKind = ..., - keepdims: L[False] = ..., + axis: None = None, + out: None = None, + overwrite_input: bool = False, + method: _InterpolationMethod = "linear", + keepdims: L[False] = False, *, - weights: _ArrayLikeFloat_co | None = ..., -) -> floating: ... -@overload + weights: _ArrayLikeFloat_co | None = None, +) -> np.float64: ... +@overload # float, scalar, axis= def percentile( - a: _ArrayLikeComplex_co, + a: _SeqND[float] | _ArrayLikeInt_co, q: _FloatLike_co, - axis: None = ..., - out: None = ..., - overwrite_input: bool = ..., - method: _MethodKind = ..., - keepdims: L[False] = ..., + axis: _ShapeLike, + out: None = None, + overwrite_input: bool = False, + method: _InterpolationMethod = "linear", + keepdims: L[False] = False, *, - weights: _ArrayLikeFloat_co | None = ..., -) -> complexfloating: ... -@overload + weights: _ArrayLikeFloat_co | None = None, +) -> NDArray[np.float64]: ... +@overload # float, scalar, keepdims=True def percentile( - a: _ArrayLikeTD64_co, + a: _SeqND[float] | _ArrayLikeInt_co, q: _FloatLike_co, - axis: None = ..., - out: None = ..., - overwrite_input: bool = ..., - method: _MethodKind = ..., - keepdims: L[False] = ..., + axis: _ShapeLike | None = None, + out: None = None, + overwrite_input: bool = False, + method: _InterpolationMethod = "linear", *, - weights: _ArrayLikeFloat_co | None = ..., -) -> timedelta64: ... -@overload + keepdims: L[True], + weights: _ArrayLikeFloat_co | None = None, +) -> NDArray[np.float64]: ... +@overload # float, array, axis=None +def percentile[ShapeT: _Shape]( + a: _SeqND[float] | _ArrayLikeInt_co, + q: _Array[ShapeT, _floating_co], + axis: None = None, + out: None = None, + overwrite_input: bool = False, + method: _InterpolationMethod = "linear", + keepdims: L[False] = False, + *, + weights: _ArrayLikeFloat_co | None = None, +) -> _Array[ShapeT, np.float64]: ... +@overload # float, array-like def percentile( - a: _ArrayLikeDT64_co, + a: _SeqND[float] | _ArrayLikeInt_co, + q: NDArray[_floating_co] | _SeqND[_FloatLike_co], + axis: _ShapeLike | None = None, + out: None = None, + overwrite_input: bool = False, + method: _InterpolationMethod = "linear", + keepdims: bool = False, + *, + weights: _ArrayLikeFloat_co | None = None, +) -> NDArray[np.float64]: ... +@overload # complex, scalar, axis=None +def percentile( + a: _ListSeqND[complex], q: _FloatLike_co, - axis: None = ..., - out: None = ..., - overwrite_input: bool = ..., - method: _MethodKind = ..., - keepdims: L[False] = ..., + axis: None = None, + out: None = None, + overwrite_input: bool = False, + method: _InterpolationMethod = "linear", + keepdims: L[False] = False, *, - weights: _ArrayLikeFloat_co | None = ..., -) -> datetime64: ... -@overload + weights: _ArrayLikeFloat_co | None = None, +) -> np.complex128: ... +@overload # complex, scalar, axis= def percentile( - a: _ArrayLikeObject_co, + a: _ListSeqND[complex], q: _FloatLike_co, - axis: None = ..., - out: None = ..., - overwrite_input: bool = ..., - method: _MethodKind = ..., - keepdims: L[False] = ..., + axis: _ShapeLike, + out: None = None, + overwrite_input: bool = False, + method: _InterpolationMethod = "linear", + keepdims: L[False] = False, *, - weights: _ArrayLikeFloat_co | None = ..., -) -> Any: ... -@overload + weights: _ArrayLikeFloat_co | None = None, +) -> NDArray[np.complex128]: ... +@overload # complex, scalar, keepdims=True def percentile( - a: _ArrayLikeFloat_co, - q: _ArrayLikeFloat_co, - axis: None = ..., - out: None = ..., - overwrite_input: bool = ..., - method: _MethodKind = ..., - keepdims: L[False] = ..., + a: _ListSeqND[complex], + q: _FloatLike_co, + axis: _ShapeLike | None = None, + out: None = None, + overwrite_input: bool = False, + method: _InterpolationMethod = "linear", *, - weights: _ArrayLikeFloat_co | None = ..., -) -> NDArray[floating]: ... -@overload + keepdims: L[True], + weights: _ArrayLikeFloat_co | None = None, +) -> NDArray[np.complex128]: ... +@overload # complex, array, axis=None +def percentile[ShapeT: _Shape]( + a: _ListSeqND[complex], + q: _Array[ShapeT, _floating_co], + axis: None = None, + out: None = None, + overwrite_input: bool = False, + method: _InterpolationMethod = "linear", + keepdims: L[False] = False, + *, + weights: _ArrayLikeFloat_co | None = None, +) -> _Array[ShapeT, np.complex128]: ... +@overload # complex, array-like def percentile( - a: _ArrayLikeComplex_co, - q: _ArrayLikeFloat_co, - axis: None = ..., - out: None = ..., - overwrite_input: bool = ..., - method: _MethodKind = ..., - keepdims: L[False] = ..., + a: _ListSeqND[complex], + q: NDArray[_floating_co] | _SeqND[_FloatLike_co], + axis: _ShapeLike | None = None, + out: None = None, + overwrite_input: bool = False, + method: _InterpolationMethod = "linear", + keepdims: bool = False, *, - weights: _ArrayLikeFloat_co | None = ..., -) -> NDArray[complexfloating]: ... -@overload + weights: _ArrayLikeFloat_co | None = None, +) -> NDArray[np.complex128]: ... +@overload # object_, scalar, axis=None def percentile( - a: _ArrayLikeTD64_co, - q: _ArrayLikeFloat_co, - axis: None = ..., - out: None = ..., - overwrite_input: bool = ..., - method: _MethodKind = ..., - keepdims: L[False] = ..., + a: _ArrayLikeObject_co, + q: _FloatLike_co, + axis: None = None, + out: None = None, + overwrite_input: bool = False, + method: _InterpolationMethod = "linear", + keepdims: L[False] = False, *, - weights: _ArrayLikeFloat_co | None = ..., -) -> NDArray[timedelta64]: ... -@overload + weights: _ArrayLikeFloat_co | None = None, +) -> Any: ... +@overload # object_, scalar, axis= def percentile( - a: _ArrayLikeDT64_co, - q: _ArrayLikeFloat_co, - axis: None = ..., - out: None = ..., - overwrite_input: bool = ..., - method: _MethodKind = ..., - keepdims: L[False] = ..., + a: _ArrayLikeObject_co, + q: _FloatLike_co, + axis: _ShapeLike, + out: None = None, + overwrite_input: bool = False, + method: _InterpolationMethod = "linear", + keepdims: L[False] = False, *, - weights: _ArrayLikeFloat_co | None = ..., -) -> NDArray[datetime64]: ... -@overload + weights: _ArrayLikeFloat_co | None = None, +) -> NDArray[np.object_]: ... +@overload # object_, scalar, keepdims=True def percentile( a: _ArrayLikeObject_co, + q: _FloatLike_co, + axis: _ShapeLike | None = None, + out: None = None, + overwrite_input: bool = False, + method: _InterpolationMethod = "linear", + *, + keepdims: L[True], + weights: _ArrayLikeFloat_co | None = None, +) -> NDArray[np.object_]: ... +@overload # object_, array, axis=None +def percentile[ShapeT: _Shape]( + a: _ArrayLikeObject_co, + q: _Array[ShapeT, _floating_co], + axis: None = None, + out: None = None, + overwrite_input: bool = False, + method: _InterpolationMethod = "linear", + keepdims: L[False] = False, + *, + weights: _ArrayLikeFloat_co | None = None, +) -> _Array[ShapeT, np.object_]: ... +@overload # object_, array-like +def percentile( + a: _ArrayLikeObject_co, + q: NDArray[_floating_co] | _SeqND[_FloatLike_co], + axis: _ShapeLike | None = None, + out: None = None, + overwrite_input: bool = False, + method: _InterpolationMethod = "linear", + keepdims: bool = False, + *, + weights: _ArrayLikeFloat_co | None = None, +) -> NDArray[np.object_]: ... +@overload # out= (keyword) +def percentile[ArrayT: np.ndarray]( + a: ArrayLike, q: _ArrayLikeFloat_co, - axis: None = ..., - out: None = ..., - overwrite_input: bool = ..., - method: _MethodKind = ..., - keepdims: L[False] = ..., + axis: _ShapeLike | None, + out: ArrayT, + overwrite_input: bool = False, + method: _InterpolationMethod = "linear", + keepdims: bool = False, *, - weights: _ArrayLikeFloat_co | None = ..., -) -> NDArray[object_]: ... -@overload + weights: _ArrayLikeFloat_co | None = None, +) -> ArrayT: ... +@overload # out= (positional) +def percentile[ArrayT: np.ndarray]( + a: ArrayLike, + q: _ArrayLikeFloat_co, + axis: _ShapeLike | None = None, + *, + out: ArrayT, + overwrite_input: bool = False, + method: _InterpolationMethod = "linear", + keepdims: bool = False, + weights: _ArrayLikeFloat_co | None = None, +) -> ArrayT: ... +@overload # fallback def percentile( - a: _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeDT64_co | _ArrayLikeObject_co, + a: _ArrayLikeNumber_co | _ArrayLikeObject_co, q: _ArrayLikeFloat_co, - axis: _ShapeLike | None = ..., - out: None = ..., - overwrite_input: bool = ..., - method: _MethodKind = ..., - keepdims: bool = ..., + axis: _ShapeLike | None = None, + out: None = None, + overwrite_input: bool = False, + method: _InterpolationMethod = "linear", + keepdims: bool = False, + *, + weights: _ArrayLikeFloat_co | None = None, +) -> Incomplete: ... + +# NOTE: keep in sync with `percentile` +@overload # inexact, scalar, axis=None +def quantile[ScalarT: np.inexact | np.timedelta64 | np.datetime64]( + a: _ArrayLike[ScalarT], + q: _FloatLike_co, + axis: None = None, + out: None = None, + overwrite_input: bool = False, + method: _InterpolationMethod = "linear", + keepdims: L[False] = False, + *, + weights: _ArrayLikeFloat_co | None = None, +) -> ScalarT: ... +@overload # inexact, scalar, axis= +def quantile[ScalarT: np.inexact | np.timedelta64 | np.datetime64]( + a: _ArrayLike[ScalarT], + q: _FloatLike_co, + axis: _ShapeLike, + out: None = None, + overwrite_input: bool = False, + method: _InterpolationMethod = "linear", + keepdims: L[False] = False, + *, + weights: _ArrayLikeFloat_co | None = None, +) -> NDArray[ScalarT]: ... +@overload # inexact, scalar, keepdims=True +def quantile[ScalarT: np.inexact | np.timedelta64 | np.datetime64]( + a: _ArrayLike[ScalarT], + q: _FloatLike_co, + axis: _ShapeLike | None = None, + out: None = None, + overwrite_input: bool = False, + method: _InterpolationMethod = "linear", + *, + keepdims: L[True], + weights: _ArrayLikeFloat_co | None = None, +) -> NDArray[ScalarT]: ... +@overload # inexact, array, axis=None +def quantile[ScalarT: np.inexact | np.timedelta64 | np.datetime64, ShapeT: _Shape]( + a: _ArrayLike[ScalarT], + q: _Array[ShapeT, _floating_co], + axis: None = None, + out: None = None, + overwrite_input: bool = False, + method: _InterpolationMethod = "linear", + keepdims: L[False] = False, + *, + weights: _ArrayLikeFloat_co | None = None, +) -> _Array[ShapeT, ScalarT]: ... +@overload # inexact, array-like +def quantile[ScalarT: np.inexact | np.timedelta64 | np.datetime64]( + a: _ArrayLike[ScalarT], + q: NDArray[_floating_co] | _SeqND[_FloatLike_co], + axis: _ShapeLike | None = None, + out: None = None, + overwrite_input: bool = False, + method: _InterpolationMethod = "linear", + keepdims: bool = False, + *, + weights: _ArrayLikeFloat_co | None = None, +) -> NDArray[ScalarT]: ... +@overload # float, scalar, axis=None +def quantile( + a: _SeqND[float] | _ArrayLikeInt_co, + q: _FloatLike_co, + axis: None = None, + out: None = None, + overwrite_input: bool = False, + method: _InterpolationMethod = "linear", + keepdims: L[False] = False, + *, + weights: _ArrayLikeFloat_co | None = None, +) -> np.float64: ... +@overload # float, scalar, axis= +def quantile( + a: _SeqND[float] | _ArrayLikeInt_co, + q: _FloatLike_co, + axis: _ShapeLike, + out: None = None, + overwrite_input: bool = False, + method: _InterpolationMethod = "linear", + keepdims: L[False] = False, + *, + weights: _ArrayLikeFloat_co | None = None, +) -> NDArray[np.float64]: ... +@overload # float, scalar, keepdims=True +def quantile( + a: _SeqND[float] | _ArrayLikeInt_co, + q: _FloatLike_co, + axis: _ShapeLike | None = None, + out: None = None, + overwrite_input: bool = False, + method: _InterpolationMethod = "linear", + *, + keepdims: L[True], + weights: _ArrayLikeFloat_co | None = None, +) -> NDArray[np.float64]: ... +@overload # float, array, axis=None +def quantile[ShapeT: _Shape]( + a: _SeqND[float] | _ArrayLikeInt_co, + q: _Array[ShapeT, _floating_co], + axis: None = None, + out: None = None, + overwrite_input: bool = False, + method: _InterpolationMethod = "linear", + keepdims: L[False] = False, + *, + weights: _ArrayLikeFloat_co | None = None, +) -> _Array[ShapeT, np.float64]: ... +@overload # float, array-like +def quantile( + a: _SeqND[float] | _ArrayLikeInt_co, + q: NDArray[_floating_co] | _SeqND[_FloatLike_co], + axis: _ShapeLike | None = None, + out: None = None, + overwrite_input: bool = False, + method: _InterpolationMethod = "linear", + keepdims: bool = False, + *, + weights: _ArrayLikeFloat_co | None = None, +) -> NDArray[np.float64]: ... +@overload # complex, scalar, axis=None +def quantile( + a: _ListSeqND[complex], + q: _FloatLike_co, + axis: None = None, + out: None = None, + overwrite_input: bool = False, + method: _InterpolationMethod = "linear", + keepdims: L[False] = False, + *, + weights: _ArrayLikeFloat_co | None = None, +) -> np.complex128: ... +@overload # complex, scalar, axis= +def quantile( + a: _ListSeqND[complex], + q: _FloatLike_co, + axis: _ShapeLike, + out: None = None, + overwrite_input: bool = False, + method: _InterpolationMethod = "linear", + keepdims: L[False] = False, + *, + weights: _ArrayLikeFloat_co | None = None, +) -> NDArray[np.complex128]: ... +@overload # complex, scalar, keepdims=True +def quantile( + a: _ListSeqND[complex], + q: _FloatLike_co, + axis: _ShapeLike | None = None, + out: None = None, + overwrite_input: bool = False, + method: _InterpolationMethod = "linear", + *, + keepdims: L[True], + weights: _ArrayLikeFloat_co | None = None, +) -> NDArray[np.complex128]: ... +@overload # complex, array, axis=None +def quantile[ShapeT: _Shape]( + a: _ListSeqND[complex], + q: _Array[ShapeT, _floating_co], + axis: None = None, + out: None = None, + overwrite_input: bool = False, + method: _InterpolationMethod = "linear", + keepdims: L[False] = False, *, - weights: _ArrayLikeFloat_co | None = ..., + weights: _ArrayLikeFloat_co | None = None, +) -> _Array[ShapeT, np.complex128]: ... +@overload # complex, array-like +def quantile( + a: _ListSeqND[complex], + q: NDArray[_floating_co] | _SeqND[_FloatLike_co], + axis: _ShapeLike | None = None, + out: None = None, + overwrite_input: bool = False, + method: _InterpolationMethod = "linear", + keepdims: bool = False, + *, + weights: _ArrayLikeFloat_co | None = None, +) -> NDArray[np.complex128]: ... +@overload # object_, scalar, axis=None +def quantile( + a: _ArrayLikeObject_co, + q: _FloatLike_co, + axis: None = None, + out: None = None, + overwrite_input: bool = False, + method: _InterpolationMethod = "linear", + keepdims: L[False] = False, + *, + weights: _ArrayLikeFloat_co | None = None, ) -> Any: ... -@overload -def percentile( - a: _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeDT64_co | _ArrayLikeObject_co, +@overload # object_, scalar, axis= +def quantile( + a: _ArrayLikeObject_co, + q: _FloatLike_co, + axis: _ShapeLike, + out: None = None, + overwrite_input: bool = False, + method: _InterpolationMethod = "linear", + keepdims: L[False] = False, + *, + weights: _ArrayLikeFloat_co | None = None, +) -> NDArray[np.object_]: ... +@overload # object_, scalar, keepdims=True +def quantile( + a: _ArrayLikeObject_co, + q: _FloatLike_co, + axis: _ShapeLike | None = None, + out: None = None, + overwrite_input: bool = False, + method: _InterpolationMethod = "linear", + *, + keepdims: L[True], + weights: _ArrayLikeFloat_co | None = None, +) -> NDArray[np.object_]: ... +@overload # object_, array, axis=None +def quantile[ShapeT: _Shape]( + a: _ArrayLikeObject_co, + q: _Array[ShapeT, _floating_co], + axis: None = None, + out: None = None, + overwrite_input: bool = False, + method: _InterpolationMethod = "linear", + keepdims: L[False] = False, + *, + weights: _ArrayLikeFloat_co | None = None, +) -> _Array[ShapeT, np.object_]: ... +@overload # object_, array-like +def quantile( + a: _ArrayLikeObject_co, + q: NDArray[_floating_co] | _SeqND[_FloatLike_co], + axis: _ShapeLike | None = None, + out: None = None, + overwrite_input: bool = False, + method: _InterpolationMethod = "linear", + keepdims: bool = False, + *, + weights: _ArrayLikeFloat_co | None = None, +) -> NDArray[np.object_]: ... +@overload # out= (keyword) +def quantile[ArrayT: np.ndarray]( + a: ArrayLike, q: _ArrayLikeFloat_co, axis: _ShapeLike | None, - out: _ArrayT, - overwrite_input: bool = ..., - method: _MethodKind = ..., - keepdims: bool = ..., + out: ArrayT, + overwrite_input: bool = False, + method: _InterpolationMethod = "linear", + keepdims: bool = False, *, - weights: _ArrayLikeFloat_co | None = ..., -) -> _ArrayT: ... -@overload -def percentile( - a: _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeDT64_co | _ArrayLikeObject_co, + weights: _ArrayLikeFloat_co | None = None, +) -> ArrayT: ... +@overload # out= (positional) +def quantile[ArrayT: np.ndarray]( + a: ArrayLike, q: _ArrayLikeFloat_co, - axis: _ShapeLike | None = ..., - *, - out: _ArrayT, - overwrite_input: bool = ..., - method: _MethodKind = ..., - keepdims: bool = ..., - weights: _ArrayLikeFloat_co | None = ..., -) -> _ArrayT: ... - -# NOTE: Not an alias, but they do have identical signatures -# (that we can reuse) -quantile = percentile - -_ScalarT_fm = TypeVar( - "_ScalarT_fm", - bound=floating | complexfloating | timedelta64, -) - -class _SupportsRMulFloat(Protocol[_T_co]): - def __rmul__(self, other: float, /) -> _T_co: ... + axis: _ShapeLike | None = None, + *, + out: ArrayT, + overwrite_input: bool = False, + method: _InterpolationMethod = "linear", + keepdims: bool = False, + weights: _ArrayLikeFloat_co | None = None, +) -> ArrayT: ... +@overload # fallback +def quantile( + a: _ArrayLikeNumber_co | _ArrayLikeObject_co, + q: _ArrayLikeFloat_co, + axis: _ShapeLike | None = None, + out: None = None, + overwrite_input: bool = False, + method: _InterpolationMethod = "linear", + keepdims: bool = False, + *, + weights: _ArrayLikeFloat_co | None = None, +) -> Incomplete: ... -@overload -def trapezoid( # type: ignore[overload-overlap] - y: Sequence[_FloatLike_co], - x: Sequence[_FloatLike_co] | None = ..., - dx: float = ..., - axis: SupportsIndex = ..., -) -> float64: ... -@overload +# +@overload # ?d, known inexact/timedelta64 scalar-type +def trapezoid[ScalarT: np.inexact | np.timedelta64]( + y: _ArrayNoD[ScalarT], + x: _ArrayLike[ScalarT] | _ArrayLikeFloat_co | None = None, + dx: float = 1.0, + axis: SupportsIndex = -1, +) -> NDArray[ScalarT] | ScalarT: ... +@overload # ?d, casts to float64 def trapezoid( - y: Sequence[_ComplexLike_co], - x: Sequence[_ComplexLike_co] | None = ..., - dx: float = ..., - axis: SupportsIndex = ..., -) -> complex128: ... + y: _ArrayNoD[_integer_co], + x: _ArrayLikeFloat_co | None = None, + dx: float = 1.0, + axis: SupportsIndex = -1, +) -> NDArray[np.float64] | np.float64: ... +@overload # strict 1d, known inexact/timedelta64 scalar-type +def trapezoid[ScalarT: np.inexact | np.timedelta64]( + y: _Array1D[ScalarT], + x: _Array1D[ScalarT] | _Seq1D[float] | None = None, + dx: float = 1.0, + axis: SupportsIndex = -1, +) -> ScalarT: ... +@overload # strict 1d, casts to float64 +def trapezoid( + y: _Array1D[_float64_co] | _Seq1D[float], + x: _Array1D[_float64_co] | _Seq1D[float] | None = None, + dx: float = 1.0, + axis: SupportsIndex = -1, +) -> np.float64: ... +@overload # strict 1d, casts to complex128 (`list` prevents overlapping overloads) +def trapezoid( + y: list[complex], + x: _Seq1D[complex] | None = None, + dx: complex = 1.0, + axis: SupportsIndex = -1, +) -> np.complex128: ... +@overload # strict 1d, casts to complex128 +def trapezoid( + y: _Seq1D[complex], + x: list[complex], + dx: complex = 1.0, + axis: SupportsIndex = -1, +) -> np.complex128: ... +@overload # strict 2d, known inexact/timedelta64 scalar-type +def trapezoid[ScalarT: np.inexact | np.timedelta64]( + y: _Array2D[ScalarT], + x: _ArrayMax2D[ScalarT] | _Seq2D[float] | _Seq1D[float] | None = None, + dx: float = 1.0, + axis: SupportsIndex = -1, +) -> ScalarT: ... +@overload # strict 2d, casts to float64 +def trapezoid( + y: _Array2D[_float64_co] | _Seq2D[float], + x: _ArrayMax2D[_float64_co] | _Seq2D[float] | _Seq1D[float] | None = None, + dx: float = 1.0, + axis: SupportsIndex = -1, +) -> np.float64: ... +@overload # strict 2d, casts to complex128 (`list` prevents overlapping overloads) +def trapezoid( + y: _Seq1D[list[complex]], + x: _Seq2D[complex] | _Seq1D[complex] | None = None, + dx: complex = 1.0, + axis: SupportsIndex = -1, +) -> np.complex128: ... +@overload # strict 2d, casts to complex128 +def trapezoid( + y: _Seq2D[complex] | _Seq1D[complex], + x: _Seq1D[list[complex]], + dx: complex = 1.0, + axis: SupportsIndex = -1, +) -> np.complex128: ... +@overload +def trapezoid[ScalarT: np.inexact | np.timedelta64]( + y: _ArrayLike[ScalarT], + x: _ArrayLike[ScalarT] | _ArrayLikeInt_co | None = None, + dx: complex = 1.0, + axis: SupportsIndex = -1, +) -> NDArray[ScalarT] | ScalarT: ... @overload def trapezoid( - y: _ArrayLike[bool_ | integer], - x: _ArrayLike[bool_ | integer] | None = ..., - dx: float = ..., - axis: SupportsIndex = ..., -) -> float64 | NDArray[float64]: ... + y: _ArrayLike[_float64_co], + x: _ArrayLikeFloat_co | None = None, + dx: float = 1.0, + axis: SupportsIndex = -1, +) -> NDArray[np.float64] | np.float64: ... @overload -def trapezoid( # type: ignore[overload-overlap] - y: _ArrayLikeObject_co, - x: _ArrayLikeFloat_co | _ArrayLikeObject_co | None = ..., - dx: float = ..., - axis: SupportsIndex = ..., -) -> float | NDArray[object_]: ... +def trapezoid( + y: _ArrayLike[np.complex128], + x: _ArrayLikeComplex_co | None = None, + dx: float = 1.0, + axis: SupportsIndex = -1, +) -> NDArray[np.complex128] | np.complex128: ... @overload def trapezoid( - y: _ArrayLike[_ScalarT_fm], - x: _ArrayLike[_ScalarT_fm] | _ArrayLikeInt_co | None = ..., - dx: float = ..., - axis: SupportsIndex = ..., -) -> _ScalarT_fm | NDArray[_ScalarT_fm]: ... + y: _ArrayLikeComplex_co, + x: _ArrayLike[np.complex128], + dx: float = 1.0, + axis: SupportsIndex = -1, +) -> NDArray[np.complex128] | np.complex128: ... @overload def trapezoid( - y: Sequence[_SupportsRMulFloat[_T]], - x: Sequence[_SupportsRMulFloat[_T] | _T] | None = ..., - dx: float = ..., - axis: SupportsIndex = ..., -) -> _T: ... + y: _ArrayLikeObject_co, + x: _ArrayLikeObject_co | _ArrayLikeFloat_co | None = None, + dx: float = 1.0, + axis: SupportsIndex = -1, +) -> NDArray[np.object_] | Any: ... +@overload +def trapezoid[T]( + y: _Seq1D[_SupportsRMulFloat[T]], + x: _Seq1D[_SupportsRMulFloat[T] | T] | None = None, + dx: complex = 1.0, + axis: SupportsIndex = -1, +) -> T: ... @overload def trapezoid( - y: _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeObject_co, - x: _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeObject_co | None = ..., - dx: float = ..., - axis: SupportsIndex = ..., -) -> ( - floating | complexfloating | timedelta64 - | NDArray[floating | complexfloating | timedelta64 | object_] -): ... - -@deprecated("Use 'trapezoid' instead") -def trapz(y: ArrayLike, x: ArrayLike | None = None, dx: float = 1.0, axis: int = -1) -> generic | NDArray[generic]: ... + y: _ArrayLikeComplex_co | _ArrayLike[np.timedelta64 | np.object_], + x: _ArrayLikeComplex_co | _ArrayLike[np.timedelta64 | np.object_] | None = None, + dx: complex = 1.0, + axis: SupportsIndex = -1, +) -> Incomplete: ... -@overload -def meshgrid( - *, - copy: bool = ..., - sparse: bool = ..., - indexing: _MeshgridIdx = ..., -) -> tuple[()]: ... -@overload -def meshgrid( - x1: _ArrayLike[_ScalarT], +# +@overload # 0d +def meshgrid(*, copy: bool = True, sparse: bool = False, indexing: _Indexing = "xy") -> tuple[()]: ... +@overload # 1d, known scalar-type +def meshgrid[ScalarT: np.generic]( + x1: _ArrayLike[ScalarT], /, *, - copy: bool = ..., - sparse: bool = ..., - indexing: _MeshgridIdx = ..., -) -> tuple[NDArray[_ScalarT]]: ... -@overload + copy: bool = True, + sparse: bool = False, + indexing: _Indexing = "xy", +) -> _Mesh1[ScalarT]: ... +@overload # 1d, unknown scalar-type def meshgrid( x1: ArrayLike, /, *, - copy: bool = ..., - sparse: bool = ..., - indexing: _MeshgridIdx = ..., -) -> tuple[NDArray[Any]]: ... -@overload -def meshgrid( - x1: _ArrayLike[_ScalarT1], - x2: _ArrayLike[_ScalarT2], - /, - *, - copy: bool = ..., - sparse: bool = ..., - indexing: _MeshgridIdx = ..., -) -> tuple[NDArray[_ScalarT1], NDArray[_ScalarT2]]: ... -@overload -def meshgrid( - x1: ArrayLike, - x2: _ArrayLike[_ScalarT], + copy: bool = True, + sparse: bool = False, + indexing: _Indexing = "xy", +) -> _Mesh1[Any]: ... +@overload # 2d, known scalar-types +def meshgrid[ScalarT1: np.generic, ScalarT2: np.generic]( + x1: _ArrayLike[ScalarT1], + x2: _ArrayLike[ScalarT2], /, *, - copy: bool = ..., - sparse: bool = ..., - indexing: _MeshgridIdx = ..., -) -> tuple[NDArray[Any], NDArray[_ScalarT]]: ... -@overload -def meshgrid( - x1: _ArrayLike[_ScalarT], + copy: bool = True, + sparse: bool = False, + indexing: _Indexing = "xy", +) -> _Mesh2[ScalarT1, ScalarT2]: ... +@overload # 2d, known/unknown scalar-types +def meshgrid[ScalarT: np.generic]( + x1: _ArrayLike[ScalarT], x2: ArrayLike, /, *, - copy: bool = ..., - sparse: bool = ..., - indexing: _MeshgridIdx = ..., -) -> tuple[NDArray[_ScalarT], NDArray[Any]]: ... -@overload -def meshgrid( + copy: bool = True, + sparse: bool = False, + indexing: _Indexing = "xy", +) -> _Mesh2[ScalarT, Any]: ... +@overload # 2d, unknown/known scalar-types +def meshgrid[ScalarT: np.generic]( x1: ArrayLike, - x2: ArrayLike, + x2: _ArrayLike[ScalarT], /, *, - copy: bool = ..., - sparse: bool = ..., - indexing: _MeshgridIdx = ..., -) -> tuple[NDArray[Any], NDArray[Any]]: ... -@overload + copy: bool = True, + sparse: bool = False, + indexing: _Indexing = "xy", +) -> _Mesh2[Any, ScalarT]: ... +@overload # 2d, unknown scalar-types def meshgrid( x1: ArrayLike, x2: ArrayLike, - x3: ArrayLike, /, *, - copy: bool = ..., - sparse: bool = ..., - indexing: _MeshgridIdx = ..., -) -> tuple[NDArray[Any], NDArray[Any], NDArray[Any]]: ... -@overload + copy: bool = True, + sparse: bool = False, + indexing: _Indexing = "xy", +) -> _Mesh2[Any, Any]: ... +@overload # 3d, known scalar-types +def meshgrid[ScalarT1: np.generic, ScalarT2: np.generic, ScalarT3: np.generic]( + x1: _ArrayLike[ScalarT1], + x2: _ArrayLike[ScalarT2], + x3: _ArrayLike[ScalarT3], + /, + *, + copy: bool = True, + sparse: bool = False, + indexing: _Indexing = "xy", +) -> _Mesh3[ScalarT1, ScalarT2, ScalarT3]: ... +@overload # 3d, unknown scalar-types def meshgrid( x1: ArrayLike, x2: ArrayLike, x3: ArrayLike, - x4: ArrayLike, /, *, - copy: bool = ..., - sparse: bool = ..., - indexing: _MeshgridIdx = ..., -) -> tuple[NDArray[Any], NDArray[Any], NDArray[Any], NDArray[Any]]: ... -@overload + copy: bool = True, + sparse: bool = False, + indexing: _Indexing = "xy", +) -> _Mesh3[Any, Any, Any]: ... +@overload # ?d, known scalar-types +def meshgrid[ScalarT: np.generic]( + *xi: _ArrayLike[ScalarT], + copy: bool = True, + sparse: bool = False, + indexing: _Indexing = "xy", +) -> tuple[NDArray[ScalarT], ...]: ... +@overload # ?d, unknown scalar-types def meshgrid( *xi: ArrayLike, - copy: bool = ..., - sparse: bool = ..., - indexing: _MeshgridIdx = ..., + copy: bool = True, + sparse: bool = False, + indexing: _Indexing = "xy", ) -> tuple[NDArray[Any], ...]: ... +# +def place(arr: np.ndarray, mask: ConvertibleToInt | Sequence[ConvertibleToInt], vals: ArrayLike) -> None: ... + +# keep in sync with `insert` +@overload # known scalar-type, axis=None (default) +def delete[ScalarT: np.generic](arr: _ArrayLike[ScalarT], obj: _IndexLike, axis: None = None) -> _Array1D[ScalarT]: ... +@overload # known array-type, axis specified +def delete[ArrayT: np.ndarray](arr: ArrayT, obj: _IndexLike, axis: SupportsIndex) -> ArrayT: ... +@overload # known scalar-type, axis specified +def delete[ScalarT: np.generic](arr: _ArrayLike[ScalarT], obj: _IndexLike, axis: SupportsIndex) -> NDArray[ScalarT]: ... +@overload # known scalar-type, axis=None (default) +def delete(arr: ArrayLike, obj: _IndexLike, axis: None = None) -> _Array1D[Any]: ... +@overload # unknown scalar-type, axis specified +def delete(arr: ArrayLike, obj: _IndexLike, axis: SupportsIndex) -> NDArray[Any]: ... + +# keep in sync with `delete` +@overload # known scalar-type, axis=None (default) +def insert[ScalarT: np.generic](arr: _ArrayLike[ScalarT], obj: _IndexLike, values: ArrayLike, axis: None = None) -> _Array1D[ScalarT]: ... +@overload # known array-type, axis specified +def insert[ArrayT: np.ndarray](arr: ArrayT, obj: _IndexLike, values: ArrayLike, axis: SupportsIndex) -> ArrayT: ... +@overload # known scalar-type, axis specified +def insert[ScalarT: np.generic](arr: _ArrayLike[ScalarT], obj: _IndexLike, values: ArrayLike, axis: SupportsIndex) -> NDArray[ScalarT]: ... +@overload # known scalar-type, axis=None (default) +def insert(arr: ArrayLike, obj: _IndexLike, values: ArrayLike, axis: None = None) -> _Array1D[Any]: ... +@overload # unknown scalar-type, axis specified +def insert(arr: ArrayLike, obj: _IndexLike, values: ArrayLike, axis: SupportsIndex) -> NDArray[Any]: ... + +# +@overload # known array type, axis specified +def append[ArrayT: np.ndarray](arr: ArrayT, values: ArrayT, axis: SupportsIndex) -> ArrayT: ... +@overload # 1d, known scalar type, axis specified +def append[ScalarT: np.generic](arr: _Seq1D[ScalarT], values: _Seq1D[ScalarT], axis: SupportsIndex) -> _Array1D[ScalarT]: ... +@overload # 2d, known scalar type, axis specified +def append[ScalarT: np.generic](arr: _Seq2D[ScalarT], values: _Seq2D[ScalarT], axis: SupportsIndex) -> _Array2D[ScalarT]: ... +@overload # 3d, known scalar type, axis specified +def append[ScalarT: np.generic](arr: _Seq3D[ScalarT], values: _Seq3D[ScalarT], axis: SupportsIndex) -> _Array3D[ScalarT]: ... +@overload # ?d, known scalar type, axis specified +def append[ScalarT: np.generic](arr: _SeqND[ScalarT], values: _SeqND[ScalarT], axis: SupportsIndex) -> NDArray[ScalarT]: ... +@overload # ?d, unknown scalar type, axis specified +def append(arr: np.ndarray | _SeqND[_ScalarLike_co], values: _SeqND[_ScalarLike_co], axis: SupportsIndex) -> np.ndarray: ... +@overload # known scalar type, axis=None +def append[ScalarT: np.generic](arr: _ArrayLike[ScalarT], values: _ArrayLike[ScalarT], axis: None = None) -> _Array1D[ScalarT]: ... +@overload # unknown scalar type, axis=None +def append(arr: ArrayLike, values: ArrayLike, axis: None = None) -> _Array1D[Any]: ... + +# @overload -def delete( - arr: _ArrayLike[_ScalarT], - obj: slice | _ArrayLikeInt_co, - axis: SupportsIndex | None = ..., -) -> NDArray[_ScalarT]: ... +def digitize[ShapeT: _Shape]( + x: _Array[ShapeT, np.floating | np.integer], bins: _ArrayLikeFloat_co, right: bool = False +) -> _Array[ShapeT, np.int_]: ... @overload -def delete( - arr: ArrayLike, - obj: slice | _ArrayLikeInt_co, - axis: SupportsIndex | None = ..., -) -> NDArray[Any]: ... - +def digitize(x: _FloatLike_co, bins: _ArrayLikeFloat_co, right: bool = False) -> np.int_: ... @overload -def insert( - arr: _ArrayLike[_ScalarT], - obj: slice | _ArrayLikeInt_co, - values: ArrayLike, - axis: SupportsIndex | None = ..., -) -> NDArray[_ScalarT]: ... +def digitize(x: _Seq1D[_FloatLike_co], bins: _ArrayLikeFloat_co, right: bool = False) -> _Array1D[np.int_]: ... @overload -def insert( - arr: ArrayLike, - obj: slice | _ArrayLikeInt_co, - values: ArrayLike, - axis: SupportsIndex | None = ..., -) -> NDArray[Any]: ... - -def append( - arr: ArrayLike, - values: ArrayLike, - axis: SupportsIndex | None = ..., -) -> NDArray[Any]: ... - +def digitize(x: _Seq2D[_FloatLike_co], bins: _ArrayLikeFloat_co, right: bool = False) -> _Array2D[np.int_]: ... @overload -def digitize( - x: _FloatLike_co, - bins: _ArrayLikeFloat_co, - right: bool = ..., -) -> intp: ... +def digitize(x: _Seq3D[_FloatLike_co], bins: _ArrayLikeFloat_co, right: bool = False) -> _Array3D[np.int_]: ... @overload -def digitize( - x: _ArrayLikeFloat_co, - bins: _ArrayLikeFloat_co, - right: bool = ..., -) -> NDArray[intp]: ... +def digitize(x: _ArrayLikeFloat_co, bins: _ArrayLikeFloat_co, right: bool = False) -> NDArray[np.int_] | Any: ... diff --git a/numpy/lib/_histograms_impl.pyi b/numpy/lib/_histograms_impl.pyi index 5e7afb5e397b..0c4c673ef063 100644 --- a/numpy/lib/_histograms_impl.pyi +++ b/numpy/lib/_histograms_impl.pyi @@ -1,21 +1,11 @@ from collections.abc import Sequence -from typing import ( - Any, - SupportsIndex, - TypeAlias, -) -from typing import ( - Literal as L, -) +from typing import Any, Literal as L, SupportsIndex -from numpy._typing import ( - ArrayLike, - NDArray, -) +from numpy._typing import ArrayLike, NDArray __all__ = ["histogram", "histogramdd", "histogram_bin_edges"] -_BinKind: TypeAlias = L[ +type _BinKind = L[ "stone", "auto", "doane", @@ -28,23 +18,23 @@ _BinKind: TypeAlias = L[ def histogram_bin_edges( a: ArrayLike, - bins: _BinKind | SupportsIndex | ArrayLike = ..., - range: tuple[float, float] | None = ..., - weights: ArrayLike | None = ..., + bins: _BinKind | SupportsIndex | ArrayLike = 10, + range: tuple[float, float] | None = None, + weights: ArrayLike | None = None, ) -> NDArray[Any]: ... def histogram( a: ArrayLike, - bins: _BinKind | SupportsIndex | ArrayLike = ..., - range: tuple[float, float] | None = ..., - density: bool = ..., - weights: ArrayLike | None = ..., + bins: _BinKind | SupportsIndex | ArrayLike = 10, + range: tuple[float, float] | None = None, + density: bool | None = None, + weights: ArrayLike | None = None, ) -> tuple[NDArray[Any], NDArray[Any]]: ... def histogramdd( sample: ArrayLike, - bins: SupportsIndex | ArrayLike = ..., - range: Sequence[tuple[float, float]] = ..., - density: bool | None = ..., - weights: ArrayLike | None = ..., + bins: SupportsIndex | ArrayLike = 10, + range: Sequence[tuple[float, float]] | None = None, + density: bool | None = None, + weights: ArrayLike | None = None, ) -> tuple[NDArray[Any], tuple[NDArray[Any], ...]]: ... diff --git a/numpy/lib/_index_tricks_impl.py b/numpy/lib/_index_tricks_impl.py index 131bbae5d098..5ee60d0fceaf 100644 --- a/numpy/lib/_index_tricks_impl.py +++ b/numpy/lib/_index_tricks_impl.py @@ -1,7 +1,7 @@ import functools import math import sys -import warnings +from itertools import product import numpy as np import numpy._core.numeric as _nx @@ -12,7 +12,6 @@ from numpy._core.numerictypes import issubdtype from numpy._utils import set_module from numpy.lib._function_base_impl import diff -from numpy.lib.stride_tricks import as_strided array_function_dispatch = functools.partial( overrides.array_function_dispatch, module='numpy') @@ -473,9 +472,9 @@ class RClass(AxisConcatenator): Optional character strings placed as the first element of the index expression can be used to change the output. The strings 'r' or 'c' result in matrix output. If the result is 1-D and 'r' is specified a 1 x N (row) - matrix is produced. If the result is 1-D and 'c' is specified, then a N x 1 - (column) matrix is produced. If the result is 2-D then both provide the - same matrix result. + matrix is produced. If the result is 1-D and 'c' is specified, then + an N x 1 (column) matrix is produced. + If the result is 2-D then both provide the same matrix result. A string integer specifies which axis to stack multiple comma separated arrays along. A string of two comma-separated integers allows indication @@ -688,30 +687,13 @@ class ndindex: def __init__(self, *shape): if len(shape) == 1 and isinstance(shape[0], tuple): shape = shape[0] - x = as_strided(_nx.zeros(1), shape=shape, - strides=_nx.zeros_like(shape)) - self._it = _nx.nditer(x, flags=['multi_index', 'zerosize_ok'], - order='C') + if min(shape, default=0) < 0: + raise ValueError("negative dimensions are not allowed") + self._iter = product(*map(range, shape)) def __iter__(self): return self - def ndincr(self): - """ - Increment the multi-dimensional index by one. - - This method is for backward compatibility only: do not use. - - .. deprecated:: 1.20.0 - This method has been advised against since numpy 1.8.0, but only - started emitting DeprecationWarning as of this version. - """ - # NumPy 1.20.0, 2020-09-08 - warnings.warn( - "`ndindex.ndincr()` is deprecated, use `next(ndindex)` instead", - DeprecationWarning, stacklevel=2) - next(self) - def __next__(self): """ Standard iterator method, updates the index and returns the index @@ -724,8 +706,7 @@ def __next__(self): iteration. """ - next(self._it) - return self._it.multi_index + return next(self._iter) # You can do all this with slice() plus a few special objects, @@ -996,7 +977,7 @@ def diag_indices(n, ndim=2): And use it to set the diagonal of an array of zeros to 1: - >>> a = np.zeros((2, 2, 2), dtype=int) + >>> a = np.zeros((2, 2, 2), dtype=np.int_) >>> a[d3] = 1 >>> a array([[[1, 0], diff --git a/numpy/lib/_index_tricks_impl.pyi b/numpy/lib/_index_tricks_impl.pyi index 7ac2b3a093e0..97930196ecfd 100644 --- a/numpy/lib/_index_tricks_impl.pyi +++ b/numpy/lib/_index_tricks_impl.pyi @@ -1,20 +1,32 @@ +from _typeshed import Incomplete, SupportsLenAndGetItem from collections.abc import Sequence -from typing import Any, ClassVar, Final, Generic, Self, SupportsIndex, final, overload -from typing import Literal as L - -from _typeshed import Incomplete -from typing_extensions import TypeVar, deprecated +from typing import ( + Any, + ClassVar, + Final, + Generic, + Literal as L, + Self, + SupportsIndex, + final, + overload, +) +from typing_extensions import TypeVar import numpy as np +from numpy import _CastingKind from numpy._core.multiarray import ravel_multi_index, unravel_index from numpy._typing import ( ArrayLike, + DTypeLike, NDArray, _AnyShape, + _ArrayLike, + _DTypeLike, _FiniteNestedSequence, + _HasDType, _NestedSequence, _SupportsArray, - _SupportsDType, ) __all__ = [ # noqa: RUF022 @@ -36,14 +48,8 @@ __all__ = [ # noqa: RUF022 ### -_T = TypeVar("_T") -_TupleT = TypeVar("_TupleT", bound=tuple[Any, ...]) -_ArrayT = TypeVar("_ArrayT", bound=NDArray[Any]) -_DTypeT = TypeVar("_DTypeT", bound=np.dtype) -_ScalarT = TypeVar("_ScalarT", bound=np.generic) -_ScalarT_co = TypeVar("_ScalarT_co", bound=np.generic, covariant=True) +_ScalarT_co = TypeVar("_ScalarT_co", bound=np.generic, default=Any, covariant=True) _BoolT_co = TypeVar("_BoolT_co", bound=bool, default=bool, covariant=True) - _AxisT_co = TypeVar("_AxisT_co", bound=int, default=L[0], covariant=True) _MatrixT_co = TypeVar("_MatrixT_co", bound=bool, default=L[False], covariant=True) _NDMinT_co = TypeVar("_NDMinT_co", bound=int, default=L[1], covariant=True) @@ -51,23 +57,26 @@ _Trans1DT_co = TypeVar("_Trans1DT_co", bound=int, default=L[-1], covariant=True) ### -class ndenumerate(Generic[_ScalarT_co]): +class ndenumerate(Generic[_ScalarT_co]): # noqa: UP046 @overload - def __new__(cls, arr: _FiniteNestedSequence[_SupportsArray[np.dtype[_ScalarT]]]) -> ndenumerate[_ScalarT]: ... + def __init__[ScalarT: np.generic]( + self: ndenumerate[ScalarT], + arr: _FiniteNestedSequence[_SupportsArray[np.dtype[ScalarT]]], + ) -> None: ... @overload - def __new__(cls, arr: str | _NestedSequence[str]) -> ndenumerate[np.str_]: ... + def __init__(self: ndenumerate[np.str_], arr: str | _NestedSequence[str]) -> None: ... @overload - def __new__(cls, arr: bytes | _NestedSequence[bytes]) -> ndenumerate[np.bytes_]: ... + def __init__(self: ndenumerate[np.bytes_], arr: bytes | _NestedSequence[bytes]) -> None: ... @overload - def __new__(cls, arr: bool | _NestedSequence[bool]) -> ndenumerate[np.bool]: ... + def __init__(self: ndenumerate[np.bool], arr: bool | _NestedSequence[bool]) -> None: ... @overload - def __new__(cls, arr: int | _NestedSequence[int]) -> ndenumerate[np.intp]: ... + def __init__(self: ndenumerate[np.intp], arr: int | _NestedSequence[int]) -> None: ... @overload - def __new__(cls, arr: float | _NestedSequence[float]) -> ndenumerate[np.float64]: ... + def __init__(self: ndenumerate[np.float64], arr: float | _NestedSequence[float]) -> None: ... @overload - def __new__(cls, arr: complex | _NestedSequence[complex]) -> ndenumerate[np.complex128]: ... + def __init__(self: ndenumerate[np.complex128], arr: complex | _NestedSequence[complex]) -> None: ... @overload - def __new__(cls, arr: object) -> ndenumerate[Any]: ... + def __init__(self: ndenumerate[Incomplete], arr: object) -> None: ... # The first overload is a (semi-)workaround for a mypy bug (tested with v1.10 and v1.11) @overload @@ -93,13 +102,11 @@ class ndindex: def __iter__(self) -> Self: ... def __next__(self) -> _AnyShape: ... - # - @deprecated("Deprecated since 1.20.0.") - def ndincr(self, /) -> None: ... - class nd_grid(Generic[_BoolT_co]): + __slots__ = ("sparse",) + sparse: _BoolT_co - def __init__(self, sparse: _BoolT_co = ...) -> None: ... + def __init__(self, sparse: _BoolT_co = ...) -> None: ... # stubdefaulter: ignore[missing-default] @overload def __getitem__(self: nd_grid[L[False]], key: slice | Sequence[slice]) -> NDArray[Incomplete]: ... @overload @@ -107,10 +114,14 @@ class nd_grid(Generic[_BoolT_co]): @final class MGridClass(nd_grid[L[False]]): + __slots__ = () + def __init__(self) -> None: ... @final class OGridClass(nd_grid[L[True]]): + __slots__ = () + def __init__(self) -> None: ... class AxisConcatenator(Generic[_AxisT_co, _MatrixT_co, _NDMinT_co, _Trans1DT_co]): @@ -123,48 +134,105 @@ class AxisConcatenator(Generic[_AxisT_co, _MatrixT_co, _NDMinT_co, _Trans1DT_co] ndmin: _NDMinT_co trans1d: _Trans1DT_co - # + # NOTE: mypy does not understand that these default values are the same as the + # TypeVar defaults. Since the workaround would require us to write 16 overloads, + # we ignore the assignment type errors here. def __init__( self, /, - axis: _AxisT_co = ..., - matrix: _MatrixT_co = ..., - ndmin: _NDMinT_co = ..., - trans1d: _Trans1DT_co = ..., + axis: _AxisT_co = 0, # type: ignore[assignment] + matrix: _MatrixT_co = False, # type: ignore[assignment] + ndmin: _NDMinT_co = 1, # type: ignore[assignment] + trans1d: _Trans1DT_co = -1, # type: ignore[assignment] ) -> None: ... # TODO(jorenham): annotate this def __getitem__(self, key: Incomplete, /) -> Incomplete: ... def __len__(self, /) -> L[0]: ... - # + # Keep in sync with _core.multiarray.concatenate @staticmethod @overload - def concatenate(*a: ArrayLike, axis: SupportsIndex | None = 0, out: _ArrayT) -> _ArrayT: ... + def concatenate[ScalarT: np.generic]( + arrays: _ArrayLike[ScalarT], + /, + axis: SupportsIndex | None = 0, + out: None = None, + *, + dtype: None = None, + casting: _CastingKind | None = "same_kind", + ) -> NDArray[ScalarT]: ... @staticmethod @overload - def concatenate(*a: ArrayLike, axis: SupportsIndex | None = 0, out: None = None) -> NDArray[Incomplete]: ... + def concatenate[ScalarT: np.generic]( + arrays: SupportsLenAndGetItem[ArrayLike], + /, + axis: SupportsIndex | None = 0, + out: None = None, + *, + dtype: _DTypeLike[ScalarT], + casting: _CastingKind | None = "same_kind", + ) -> NDArray[ScalarT]: ... + @staticmethod + @overload + def concatenate( + arrays: SupportsLenAndGetItem[ArrayLike], + /, + axis: SupportsIndex | None = 0, + out: None = None, + *, + dtype: DTypeLike | None = None, + casting: _CastingKind | None = "same_kind", + ) -> NDArray[Incomplete]: ... + @staticmethod + @overload + def concatenate[OutT: np.ndarray]( + arrays: SupportsLenAndGetItem[ArrayLike], + /, + axis: SupportsIndex | None = 0, + *, + out: OutT, + dtype: DTypeLike | None = None, + casting: _CastingKind | None = "same_kind", + ) -> OutT: ... + @staticmethod + @overload + def concatenate[OutT: np.ndarray]( + arrays: SupportsLenAndGetItem[ArrayLike], + /, + axis: SupportsIndex | None, + out: OutT, + *, + dtype: DTypeLike | None = None, + casting: _CastingKind | None = "same_kind", + ) -> OutT: ... @final class RClass(AxisConcatenator[L[0], L[False], L[1], L[-1]]): + __slots__ = () + def __init__(self, /) -> None: ... @final class CClass(AxisConcatenator[L[-1], L[False], L[2], L[0]]): + __slots__ = () + def __init__(self, /) -> None: ... class IndexExpression(Generic[_BoolT_co]): + __slots__ = ("maketuple",) + maketuple: _BoolT_co def __init__(self, maketuple: _BoolT_co) -> None: ... @overload - def __getitem__(self, item: _TupleT) -> _TupleT: ... + def __getitem__[TupleT: tuple[Any, ...]](self, item: TupleT) -> TupleT: ... @overload - def __getitem__(self: IndexExpression[L[True]], item: _T) -> tuple[_T]: ... + def __getitem__[T](self: IndexExpression[L[True]], item: T) -> tuple[T]: ... @overload - def __getitem__(self: IndexExpression[L[False]], item: _T) -> _T: ... + def __getitem__[T](self: IndexExpression[L[False]], item: T) -> T: ... @overload -def ix_(*args: _FiniteNestedSequence[_SupportsDType[_DTypeT]]) -> tuple[np.ndarray[_AnyShape, _DTypeT], ...]: ... +def ix_[DTypeT: np.dtype](*args: _FiniteNestedSequence[_HasDType[DTypeT]]) -> tuple[np.ndarray[_AnyShape, DTypeT], ...]: ... @overload def ix_(*args: str | _NestedSequence[str]) -> tuple[NDArray[np.str_], ...]: ... @overload @@ -179,10 +247,10 @@ def ix_(*args: float | _NestedSequence[float]) -> tuple[NDArray[np.float64], ... def ix_(*args: complex | _NestedSequence[complex]) -> tuple[NDArray[np.complex128], ...]: ... # -def fill_diagonal(a: NDArray[Any], val: object, wrap: bool = ...) -> None: ... +def fill_diagonal(a: NDArray[Any], val: object, wrap: bool = False) -> None: ... # -def diag_indices(n: int, ndim: int = ...) -> tuple[NDArray[np.intp], ...]: ... +def diag_indices(n: int, ndim: int = 2) -> tuple[NDArray[np.intp], ...]: ... def diag_indices_from(arr: ArrayLike) -> tuple[NDArray[np.intp], ...]: ... # diff --git a/numpy/lib/_iotools.pyi b/numpy/lib/_iotools.pyi index 21cfc3b19503..7baca9c78045 100644 --- a/numpy/lib/_iotools.pyi +++ b/numpy/lib/_iotools.pyi @@ -5,7 +5,6 @@ from typing import ( Final, Literal, TypedDict, - TypeVar, Unpack, overload, type_check_only, @@ -13,11 +12,10 @@ from typing import ( import numpy as np import numpy.typing as npt - -_T = TypeVar("_T") +from numpy._typing._dtype_like import _DTypeLikeNested @type_check_only -class _ValidationKwargs(TypedDict, total=False): +class _NameValidatorKwargs(TypedDict, total=False): excludelist: Iterable[str] | None deletechars: Iterable[str] | None case_sensitive: Literal["upper", "lower"] | bool | None @@ -25,7 +23,7 @@ class _ValidationKwargs(TypedDict, total=False): ### -__docformat__: Final[str] = "restructuredtext en" +__docformat__: Final = "restructuredtext en" class ConverterError(Exception): ... class ConverterLockError(ConverterError): ... @@ -45,11 +43,11 @@ class LineSplitter: encoding: str | None = None, ) -> None: ... def __call__(self, /, line: str | bytes) -> list[str]: ... - def autostrip(self, /, method: Callable[[_T], Iterable[str]]) -> Callable[[_T], list[str]]: ... + def autostrip[T](self, /, method: Callable[[T], Iterable[str]]) -> Callable[[T], list[str]]: ... class NameValidator: - defaultexcludelist: ClassVar[Sequence[str]] - defaultdeletechars: ClassVar[Sequence[str]] + defaultexcludelist: ClassVar[Sequence[str]] = ... + defaultdeletechars: ClassVar[frozenset[str]] = ... excludelist: list[str] deletechars: set[str] case_converter: Callable[[str], str] @@ -98,17 +96,18 @@ class StringConverter: @classmethod def upgrade_mapper(cls, func: Callable[[str], Any], default: object | None = None) -> None: ... +def _decode_line(line: str | bytes, encoding: str | None = None) -> str: ... +def _is_string_like(obj: object) -> bool: ... +def _is_bytes_like(obj: object) -> bool: ... +def has_nested_fields(ndtype: np.dtype[np.void]) -> bool: ... +def flatten_dtype(ndtype: np.dtype[np.void], flatten_base: bool = False) -> type[np.dtype]: ... @overload def str2bool(value: Literal["false", "False", "FALSE"]) -> Literal[False]: ... @overload def str2bool(value: Literal["true", "True", "TRUE"]) -> Literal[True]: ... - -# -def has_nested_fields(ndtype: np.dtype[np.void]) -> bool: ... -def flatten_dtype(ndtype: np.dtype[np.void], flatten_base: bool = False) -> type[np.dtype]: ... def easy_dtype( - ndtype: npt.DTypeLike, - names: Iterable[str] | None = None, + ndtype: str | Sequence[_DTypeLikeNested], + names: str | Sequence[str] | None = None, defaultfmt: str = "f%i", - **validationargs: Unpack[_ValidationKwargs], + **validationargs: Unpack[_NameValidatorKwargs], ) -> np.dtype[np.void]: ... diff --git a/numpy/lib/_nanfunctions_impl.py b/numpy/lib/_nanfunctions_impl.py index 4a01490301c8..f030d74c5c11 100644 --- a/numpy/lib/_nanfunctions_impl.py +++ b/numpy/lib/_nanfunctions_impl.py @@ -717,7 +717,6 @@ def nansum(a, axis=None, dtype=None, out=None, keepdims=np._NoValue, inf >>> np.nansum([1, np.nan, -np.inf]) -inf - >>> from numpy.testing import suppress_warnings >>> with np.errstate(invalid="ignore"): ... np.nansum([1, np.nan, np.inf, -np.inf]) # both +/- infinity present np.float64(nan) @@ -1220,7 +1219,7 @@ def nanmedian(a, axis=None, out=None, overwrite_input=False, keepdims=np._NoValu def _nanpercentile_dispatcher( a, q, axis=None, out=None, overwrite_input=None, - method=None, keepdims=None, *, weights=None, interpolation=None): + method=None, keepdims=None, *, weights=None): return (a, q, out, weights) @@ -1235,7 +1234,6 @@ def nanpercentile( keepdims=np._NoValue, *, weights=None, - interpolation=None, ): """ Compute the qth percentile of the data along the specified axis, @@ -1314,11 +1312,6 @@ def nanpercentile( .. versionadded:: 2.0.0 - interpolation : str, optional - Deprecated name for the method keyword argument. - - .. deprecated:: 1.22.0 - Returns ------- percentile : scalar or ndarray @@ -1380,10 +1373,6 @@ def nanpercentile( The American Statistician, 50(4), pp. 361-365, 1996 """ - if interpolation is not None: - method = fnb._check_interpolation_as_method( - method, interpolation, "nanpercentile") - a = np.asanyarray(a) if a.dtype.kind == "c": raise TypeError("a must be an array of real numbers") @@ -1408,8 +1397,7 @@ def nanpercentile( def _nanquantile_dispatcher(a, q, axis=None, out=None, overwrite_input=None, - method=None, keepdims=None, *, weights=None, - interpolation=None): + method=None, keepdims=None, *, weights=None): return (a, q, out, weights) @@ -1424,7 +1412,6 @@ def nanquantile( keepdims=np._NoValue, *, weights=None, - interpolation=None, ): """ Compute the qth quantile of the data along the specified axis, @@ -1501,11 +1488,6 @@ def nanquantile( .. versionadded:: 2.0.0 - interpolation : str, optional - Deprecated name for the method keyword argument. - - .. deprecated:: 1.22.0 - Returns ------- quantile : scalar or ndarray @@ -1566,11 +1548,6 @@ def nanquantile( The American Statistician, 50(4), pp. 361-365, 1996 """ - - if interpolation is not None: - method = fnb._check_interpolation_as_method( - method, interpolation, "nanquantile") - a = np.asanyarray(a) if a.dtype.kind == "c": raise TypeError("a must be an array of real numbers") diff --git a/numpy/lib/_nanfunctions_impl.pyi b/numpy/lib/_nanfunctions_impl.pyi index f39800d58d07..fd5d277cbd7d 100644 --- a/numpy/lib/_nanfunctions_impl.pyi +++ b/numpy/lib/_nanfunctions_impl.pyi @@ -11,11 +11,7 @@ from numpy._core.fromnumeric import ( sum, var, ) -from numpy.lib._function_base_impl import ( - median, - percentile, - quantile, -) +from numpy.lib._function_base_impl import median, percentile, quantile __all__ = [ "nansum", diff --git a/numpy/lib/_npyio_impl.py b/numpy/lib/_npyio_impl.py index e588b8454b44..0e135917cd52 100644 --- a/numpy/lib/_npyio_impl.py +++ b/numpy/lib/_npyio_impl.py @@ -195,16 +195,13 @@ def __init__(self, fid, own_fid=False, allow_pickle=False, # Import is postponed to here since zipfile depends on gzip, an # optional component of the so-called standard library. _zip = zipfile_factory(fid) - self._files = _zip.namelist() - self.files = [] + _files = _zip.namelist() + self.files = [name.removesuffix(".npy") for name in _files] + self._files = dict(zip(self.files, _files)) + self._files.update(zip(_files, _files)) self.allow_pickle = allow_pickle self.max_header_size = max_header_size self.pickle_kwargs = pickle_kwargs - for x in self._files: - if x.endswith('.npy'): - self.files.append(x[:-4]) - else: - self.files.append(x) self.zip = _zip self.f = BagObj(self) if own_fid: @@ -240,37 +237,34 @@ def __len__(self): return len(self.files) def __getitem__(self, key): - # FIXME: This seems like it will copy strings around - # more than is strictly necessary. The zipfile - # will read the string and then - # the format.read_array will copy the string - # to another place in memory. - # It would be better if the zipfile could read - # (or at least uncompress) the data - # directly into the array memory. - member = False - if key in self._files: - member = True - elif key in self.files: - member = True - key += '.npy' - if member: - bytes = self.zip.open(key) - magic = bytes.read(len(format.MAGIC_PREFIX)) - bytes.close() - if magic == format.MAGIC_PREFIX: - bytes = self.zip.open(key) - return format.read_array(bytes, - allow_pickle=self.allow_pickle, - pickle_kwargs=self.pickle_kwargs, - max_header_size=self.max_header_size) - else: - return self.zip.read(key) + try: + key = self._files[key] + except KeyError: + raise KeyError(f"{key} is not a file in the archive") from None else: - raise KeyError(f"{key} is not a file in the archive") + with self.zip.open(key) as bytes: + magic = bytes.read(len(format.MAGIC_PREFIX)) + bytes.seek(0) + if magic == format.MAGIC_PREFIX: + # FIXME: This seems like it will copy strings around + # more than is strictly necessary. The zipfile + # will read the string and then + # the format.read_array will copy the string + # to another place in memory. + # It would be better if the zipfile could read + # (or at least uncompress) the data + # directly into the array memory. + return format.read_array( + bytes, + allow_pickle=self.allow_pickle, + pickle_kwargs=self.pickle_kwargs, + max_header_size=self.max_header_size + ) + else: + return bytes.read() def __contains__(self, key): - return (key in self._files or key in self.files) + return (key in self._files) def __repr__(self): # Get filename or default to `object` @@ -503,12 +497,12 @@ def load(file, mmap_mode=None, allow_pickle=False, fix_imports=True, f"Failed to interpret file {file!r} as a pickle") from e -def _save_dispatcher(file, arr, allow_pickle=None, fix_imports=None): +def _save_dispatcher(file, arr, allow_pickle=None): return (arr,) @array_function_dispatch(_save_dispatcher) -def save(file, arr, allow_pickle=True, fix_imports=np._NoValue): +def save(file, arr, allow_pickle=True): """ Save an array to a binary file in NumPy ``.npy`` format. @@ -529,12 +523,6 @@ def save(file, arr, allow_pickle=True, fix_imports=np._NoValue): require libraries that are not available, and not all pickled data is compatible between different versions of Python). Default: True - fix_imports : bool, optional - The `fix_imports` flag is deprecated and has no effect. - - .. deprecated:: 2.1 - This flag is ignored since NumPy 1.17 and was only needed to - support loading in Python 2 some files written in Python 3. See Also -------- @@ -571,12 +559,6 @@ def save(file, arr, allow_pickle=True, fix_imports=np._NoValue): >>> print(a, b) # [1 2] [1 3] """ - if fix_imports is not np._NoValue: - # Deprecated 2024-05-16, NumPy 2.1 - warnings.warn( - "The 'fix_imports' flag is deprecated and has no effect. " - "(Deprecated in NumPy 2.1)", - DeprecationWarning, stacklevel=2) if hasattr(file, 'write'): file_ctx = contextlib.nullcontext(file) else: @@ -587,8 +569,7 @@ def save(file, arr, allow_pickle=True, fix_imports=np._NoValue): with file_ctx as fid: arr = np.asanyarray(arr) - format.write_array(fid, arr, allow_pickle=allow_pickle, - pickle_kwargs={'fix_imports': fix_imports}) + format.write_array(fid, arr, allow_pickle=allow_pickle) def _savez_dispatcher(file, *args, allow_pickle=True, **kwds): @@ -1362,7 +1343,7 @@ def loadtxt(fname, dtype=float, comments='#', delimiter=None, single escaped character: >>> s = StringIO('"Hello, my name is ""Monty""!"') - >>> np.loadtxt(s, dtype="U", delimiter=",", quotechar='"') + >>> np.loadtxt(s, dtype=np.str_, delimiter=",", quotechar='"') array('Hello, my name is "Monty"!', dtype=' object: ... -class BagObj(Generic[_T_co]): - def __init__(self, /, obj: SupportsKeysAndGetItem[str, _T_co]) -> None: ... - def __getattribute__(self, key: str, /) -> _T_co: ... +class BagObj[T]: + def __init__(self, /, obj: SupportsKeysAndGetItem[str, T]) -> None: ... + def __getattribute__(self, key: str, /) -> T: ... def __dir__(self) -> list[str]: ... class NpzFile(Mapping[str, NDArray[_ScalarT_co]]): _MAX_REPR_ARRAY_COUNT: ClassVar[int] = 5 - zip: zipfile.ZipFile - fid: IO[str] | None + zip: zipfile.ZipFile | None = None + fid: IO[str] | None = None files: list[str] allow_pickle: bool pickle_kwargs: Mapping[str, Any] | None @@ -92,6 +88,15 @@ class NpzFile(Mapping[str, NDArray[_ScalarT_co]]): def __iter__(self) -> Iterator[str]: ... @override def __getitem__(self, key: str, /) -> NDArray[_ScalarT_co]: ... + + # + @override + @overload + def get(self, key: str, default: None = None, /) -> NDArray[_ScalarT_co] | None: ... + @overload + def get[T](self, key: str, default: NDArray[_ScalarT_co] | T, /) -> NDArray[_ScalarT_co] | T: ... # pyright: ignore[reportIncompatibleMethodOverride] + + # def close(self) -> None: ... # NOTE: Returns a `NpzFile` if file is a zip file; @@ -106,19 +111,8 @@ def load( max_header_size: int = 10_000, ) -> Any: ... -@overload def save(file: _FNameWriteBytes, arr: ArrayLike, allow_pickle: bool = True) -> None: ... -@overload -@deprecated("The 'fix_imports' flag is deprecated in NumPy 2.1.") -def save(file: _FNameWriteBytes, arr: ArrayLike, allow_pickle: bool, fix_imports: bool) -> None: ... -@overload -@deprecated("The 'fix_imports' flag is deprecated in NumPy 2.1.") -def save(file: _FNameWriteBytes, arr: ArrayLike, allow_pickle: bool = True, *, fix_imports: bool) -> None: ... - -# def savez(file: _FNameWriteBytes, *args: ArrayLike, allow_pickle: bool = True, **kwds: ArrayLike) -> None: ... - -# def savez_compressed(file: _FNameWriteBytes, *args: ArrayLike, allow_pickle: bool = True, **kwds: ArrayLike) -> None: ... # File-like objects only have to implement `__iter__` and, @@ -141,9 +135,9 @@ def loadtxt( like: _SupportsArrayFunc | None = None, ) -> NDArray[np.float64]: ... @overload -def loadtxt( +def loadtxt[ScalarT: np.generic]( fname: _FName, - dtype: _DTypeLike[_ScalarT], + dtype: _DTypeLike[ScalarT], comments: str | Sequence[str] | None = "#", delimiter: str | None = None, converters: Mapping[int | str, Callable[[str], Any]] | Callable[[str], Any] | None = None, @@ -156,11 +150,11 @@ def loadtxt( *, quotechar: str | None = None, like: _SupportsArrayFunc | None = None, -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload def loadtxt( fname: _FName, - dtype: DTypeLike, + dtype: DTypeLike | None, comments: str | Sequence[str] | None = "#", delimiter: str | None = None, converters: Mapping[int | str, Callable[[str], Any]] | Callable[[str], Any] | None = None, @@ -188,17 +182,17 @@ def savetxt( ) -> None: ... @overload -def fromregex( +def fromregex[ScalarT: np.generic]( file: _FNameRead, regexp: str | bytes | Pattern[Any], - dtype: _DTypeLike[_ScalarT], + dtype: _DTypeLike[ScalarT], encoding: str | None = None, -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload def fromregex( file: _FNameRead, regexp: str | bytes | Pattern[Any], - dtype: DTypeLike, + dtype: DTypeLike | None, encoding: str | None = None, ) -> NDArray[Any]: ... @@ -206,88 +200,88 @@ def fromregex( def genfromtxt( fname: _FName, dtype: None = None, - comments: str = ..., - delimiter: str | int | Iterable[int] | None = ..., - skip_header: int = ..., - skip_footer: int = ..., - converters: Mapping[int | str, Callable[[str], Any]] | None = ..., - missing_values: Any = ..., - filling_values: Any = ..., - usecols: Sequence[int] | None = ..., - names: L[True] | str | Collection[str] | None = ..., - excludelist: Sequence[str] | None = ..., - deletechars: str = ..., - replace_space: str = ..., - autostrip: bool = ..., - case_sensitive: bool | L["upper", "lower"] = ..., - defaultfmt: str = ..., - unpack: bool | None = ..., - usemask: bool = ..., - loose: bool = ..., - invalid_raise: bool = ..., - max_rows: int | None = ..., - encoding: str = ..., + comments: str = "#", + delimiter: str | int | Iterable[int] | None = None, + skip_header: int = 0, + skip_footer: int = 0, + converters: Mapping[int | str, Callable[[str], Any]] | None = None, + missing_values: Any = None, + filling_values: Any = None, + usecols: Sequence[int] | None = None, + names: L[True] | str | Collection[str] | None = None, + excludelist: Sequence[str] | None = None, + deletechars: str = " !#$%&'()*+,-./:;<=>?@[\\]^{|}~", + replace_space: str = "_", + autostrip: bool = False, + case_sensitive: bool | L["upper", "lower"] = True, + defaultfmt: str = "f%i", + unpack: bool | None = None, + usemask: bool = False, + loose: bool = True, + invalid_raise: bool = True, + max_rows: int | None = None, + encoding: str | None = None, *, - ndmin: L[0, 1, 2] = ..., - like: _SupportsArrayFunc | None = ..., + ndmin: L[0, 1, 2] = 0, + like: _SupportsArrayFunc | None = None, ) -> NDArray[Any]: ... @overload -def genfromtxt( +def genfromtxt[ScalarT: np.generic]( fname: _FName, - dtype: _DTypeLike[_ScalarT], - comments: str = ..., - delimiter: str | int | Iterable[int] | None = ..., - skip_header: int = ..., - skip_footer: int = ..., - converters: Mapping[int | str, Callable[[str], Any]] | None = ..., - missing_values: Any = ..., - filling_values: Any = ..., - usecols: Sequence[int] | None = ..., - names: L[True] | str | Collection[str] | None = ..., - excludelist: Sequence[str] | None = ..., - deletechars: str = ..., - replace_space: str = ..., - autostrip: bool = ..., - case_sensitive: bool | L["upper", "lower"] = ..., - defaultfmt: str = ..., - unpack: bool | None = ..., - usemask: bool = ..., - loose: bool = ..., - invalid_raise: bool = ..., - max_rows: int | None = ..., - encoding: str = ..., + dtype: _DTypeLike[ScalarT], + comments: str = "#", + delimiter: str | int | Iterable[int] | None = None, + skip_header: int = 0, + skip_footer: int = 0, + converters: Mapping[int | str, Callable[[str], Any]] | None = None, + missing_values: Any = None, + filling_values: Any = None, + usecols: Sequence[int] | None = None, + names: L[True] | str | Collection[str] | None = None, + excludelist: Sequence[str] | None = None, + deletechars: str = " !#$%&'()*+,-./:;<=>?@[\\]^{|}~", + replace_space: str = "_", + autostrip: bool = False, + case_sensitive: bool | L["upper", "lower"] = True, + defaultfmt: str = "f%i", + unpack: bool | None = None, + usemask: bool = False, + loose: bool = True, + invalid_raise: bool = True, + max_rows: int | None = None, + encoding: str | None = None, *, - ndmin: L[0, 1, 2] = ..., - like: _SupportsArrayFunc | None = ..., -) -> NDArray[_ScalarT]: ... + ndmin: L[0, 1, 2] = 0, + like: _SupportsArrayFunc | None = None, +) -> NDArray[ScalarT]: ... @overload def genfromtxt( fname: _FName, - dtype: DTypeLike, - comments: str = ..., - delimiter: str | int | Iterable[int] | None = ..., - skip_header: int = ..., - skip_footer: int = ..., - converters: Mapping[int | str, Callable[[str], Any]] | None = ..., - missing_values: Any = ..., - filling_values: Any = ..., - usecols: Sequence[int] | None = ..., - names: L[True] | str | Collection[str] | None = ..., - excludelist: Sequence[str] | None = ..., - deletechars: str = ..., - replace_space: str = ..., - autostrip: bool = ..., - case_sensitive: bool | L["upper", "lower"] = ..., - defaultfmt: str = ..., - unpack: bool | None = ..., - usemask: bool = ..., - loose: bool = ..., - invalid_raise: bool = ..., - max_rows: int | None = ..., - encoding: str = ..., + dtype: DTypeLike | None, + comments: str = "#", + delimiter: str | int | Iterable[int] | None = None, + skip_header: int = 0, + skip_footer: int = 0, + converters: Mapping[int | str, Callable[[str], Any]] | None = None, + missing_values: Any = None, + filling_values: Any = None, + usecols: Sequence[int] | None = None, + names: L[True] | str | Collection[str] | None = None, + excludelist: Sequence[str] | None = None, + deletechars: str = " !#$%&'()*+,-./:;<=>?@[\\]^{|}~", + replace_space: str = "_", + autostrip: bool = False, + case_sensitive: bool | L["upper", "lower"] = True, + defaultfmt: str = "f%i", + unpack: bool | None = None, + usemask: bool = False, + loose: bool = True, + invalid_raise: bool = True, + max_rows: int | None = None, + encoding: str | None = None, *, - ndmin: L[0, 1, 2] = ..., - like: _SupportsArrayFunc | None = ..., + ndmin: L[0, 1, 2] = 0, + like: _SupportsArrayFunc | None = None, ) -> NDArray[Any]: ... @overload diff --git a/numpy/lib/_polynomial_impl.py b/numpy/lib/_polynomial_impl.py index a58ca76ec2b0..e9d2d5d23fc6 100644 --- a/numpy/lib/_polynomial_impl.py +++ b/numpy/lib/_polynomial_impl.py @@ -466,7 +466,7 @@ def polyfit(x, y, deg, rcond=None, full=False, w=None, cov=False): A summary of the differences can be found in the :doc:`transition guide `. - Fit a polynomial ``p(x) = p[0] * x**deg + ... + p[deg]`` of degree `deg` + Fit a polynomial ``p[0] * x**deg + ... + p[deg]`` of degree `deg` to points `(x, y)`. Returns a vector of coefficients `p` that minimises the squared error in the order `deg`, `deg-1`, ... `0`. @@ -520,9 +520,9 @@ def polyfit(x, y, deg, rcond=None, full=False, w=None, cov=False): - residuals -- sum of squared residuals of the least squares fit - rank -- the effective rank of the scaled Vandermonde - coefficient matrix + coefficient matrix - singular_values -- singular values of the scaled Vandermonde - coefficient matrix + coefficient matrix - rcond -- value of `rcond`. For more details, see `numpy.linalg.lstsq`. diff --git a/numpy/lib/_polynomial_impl.pyi b/numpy/lib/_polynomial_impl.pyi index faf2f01e6a22..4899b868071c 100644 --- a/numpy/lib/_polynomial_impl.pyi +++ b/numpy/lib/_polynomial_impl.pyi @@ -1,15 +1,4 @@ -from typing import ( - Any, - NoReturn, - SupportsIndex, - SupportsInt, - TypeAlias, - TypeVar, - overload, -) -from typing import ( - Literal as L, -) +from typing import Any, Literal as L, NoReturn, SupportsIndex, SupportsInt, overload import numpy as np from numpy import ( @@ -35,16 +24,10 @@ from numpy._typing import ( _ArrayLikeUInt_co, ) -_T = TypeVar("_T") +type _2Tup[T] = tuple[T, T] +type _5Tup[T] = tuple[T, NDArray[float64], NDArray[int32], NDArray[float64], NDArray[float64]] -_2Tup: TypeAlias = tuple[_T, _T] -_5Tup: TypeAlias = tuple[ - _T, - NDArray[float64], - NDArray[int32], - NDArray[float64], - NDArray[float64], -] +### __all__ = [ "poly", @@ -69,47 +52,47 @@ def roots(p: ArrayLike) -> NDArray[complexfloating] | NDArray[floating]: ... @overload def polyint( p: poly1d, - m: SupportsInt | SupportsIndex = ..., - k: _ArrayLikeComplex_co | _ArrayLikeObject_co | None = ..., + m: SupportsInt | SupportsIndex = 1, + k: _ArrayLikeComplex_co | _ArrayLikeObject_co | None = None, ) -> poly1d: ... @overload def polyint( p: _ArrayLikeFloat_co, - m: SupportsInt | SupportsIndex = ..., - k: _ArrayLikeFloat_co | None = ..., + m: SupportsInt | SupportsIndex = 1, + k: _ArrayLikeFloat_co | None = None, ) -> NDArray[floating]: ... @overload def polyint( p: _ArrayLikeComplex_co, - m: SupportsInt | SupportsIndex = ..., - k: _ArrayLikeComplex_co | None = ..., + m: SupportsInt | SupportsIndex = 1, + k: _ArrayLikeComplex_co | None = None, ) -> NDArray[complexfloating]: ... @overload def polyint( p: _ArrayLikeObject_co, - m: SupportsInt | SupportsIndex = ..., - k: _ArrayLikeObject_co | None = ..., + m: SupportsInt | SupportsIndex = 1, + k: _ArrayLikeObject_co | None = None, ) -> NDArray[object_]: ... @overload def polyder( p: poly1d, - m: SupportsInt | SupportsIndex = ..., + m: SupportsInt | SupportsIndex = 1, ) -> poly1d: ... @overload def polyder( p: _ArrayLikeFloat_co, - m: SupportsInt | SupportsIndex = ..., + m: SupportsInt | SupportsIndex = 1, ) -> NDArray[floating]: ... @overload def polyder( p: _ArrayLikeComplex_co, - m: SupportsInt | SupportsIndex = ..., + m: SupportsInt | SupportsIndex = 1, ) -> NDArray[complexfloating]: ... @overload def polyder( p: _ArrayLikeObject_co, - m: SupportsInt | SupportsIndex = ..., + m: SupportsInt | SupportsIndex = 1, ) -> NDArray[object_]: ... @overload @@ -117,60 +100,84 @@ def polyfit( x: _ArrayLikeFloat_co, y: _ArrayLikeFloat_co, deg: SupportsIndex | SupportsInt, - rcond: float | None = ..., - full: L[False] = ..., - w: _ArrayLikeFloat_co | None = ..., - cov: L[False] = ..., + rcond: float | None = None, + full: L[False] = False, + w: _ArrayLikeFloat_co | None = None, + cov: L[False] = False, ) -> NDArray[float64]: ... @overload def polyfit( x: _ArrayLikeComplex_co, y: _ArrayLikeComplex_co, deg: SupportsIndex | SupportsInt, - rcond: float | None = ..., - full: L[False] = ..., - w: _ArrayLikeFloat_co | None = ..., - cov: L[False] = ..., + rcond: float | None = None, + full: L[False] = False, + w: _ArrayLikeFloat_co | None = None, + cov: L[False] = False, ) -> NDArray[complex128]: ... @overload def polyfit( x: _ArrayLikeFloat_co, y: _ArrayLikeFloat_co, deg: SupportsIndex | SupportsInt, - rcond: float | None = ..., - full: L[False] = ..., - w: _ArrayLikeFloat_co | None = ..., - cov: L[True, "unscaled"] = ..., + rcond: float | None = None, + full: L[False] = False, + w: _ArrayLikeFloat_co | None = None, + *, + cov: L[True, "unscaled"], ) -> _2Tup[NDArray[float64]]: ... @overload def polyfit( x: _ArrayLikeComplex_co, y: _ArrayLikeComplex_co, deg: SupportsIndex | SupportsInt, - rcond: float | None = ..., - full: L[False] = ..., - w: _ArrayLikeFloat_co | None = ..., - cov: L[True, "unscaled"] = ..., + rcond: float | None = None, + full: L[False] = False, + w: _ArrayLikeFloat_co | None = None, + *, + cov: L[True, "unscaled"], ) -> _2Tup[NDArray[complex128]]: ... @overload def polyfit( x: _ArrayLikeFloat_co, y: _ArrayLikeFloat_co, deg: SupportsIndex | SupportsInt, - rcond: float | None = ..., - full: L[True] = ..., - w: _ArrayLikeFloat_co | None = ..., - cov: bool | L["unscaled"] = ..., + rcond: float | None, + full: L[True], + w: _ArrayLikeFloat_co | None = None, + cov: bool | L["unscaled"] = False, ) -> _5Tup[NDArray[float64]]: ... @overload +def polyfit( + x: _ArrayLikeFloat_co, + y: _ArrayLikeFloat_co, + deg: SupportsIndex | SupportsInt, + rcond: float | None = None, + *, + full: L[True], + w: _ArrayLikeFloat_co | None = None, + cov: bool | L["unscaled"] = False, +) -> _5Tup[NDArray[float64]]: ... +@overload +def polyfit( + x: _ArrayLikeComplex_co, + y: _ArrayLikeComplex_co, + deg: SupportsIndex | SupportsInt, + rcond: float | None, + full: L[True], + w: _ArrayLikeFloat_co | None = None, + cov: bool | L["unscaled"] = False, +) -> _5Tup[NDArray[complex128]]: ... +@overload def polyfit( x: _ArrayLikeComplex_co, y: _ArrayLikeComplex_co, deg: SupportsIndex | SupportsInt, - rcond: float | None = ..., - full: L[True] = ..., - w: _ArrayLikeFloat_co | None = ..., - cov: bool | L["unscaled"] = ..., + rcond: float | None = None, + *, + full: L[True], + w: _ArrayLikeFloat_co | None = None, + cov: bool | L["unscaled"] = False, ) -> _5Tup[NDArray[complex128]]: ... @overload diff --git a/numpy/lib/_scimath_impl.py b/numpy/lib/_scimath_impl.py index 8136a7d54515..b33f42b3d10d 100644 --- a/numpy/lib/_scimath_impl.py +++ b/numpy/lib/_scimath_impl.py @@ -628,9 +628,9 @@ def arctanh(x): >>> np.emath.arctanh(0.5) 0.5493061443340549 - >>> from numpy.testing import suppress_warnings - >>> with suppress_warnings() as sup: - ... sup.filter(RuntimeWarning) + >>> import warnings + >>> with warnings.catch_warnings(): + ... warnings.simplefilter('ignore', RuntimeWarning) ... np.emath.arctanh(np.eye(2)) array([[inf, 0.], [ 0., inf]]) diff --git a/numpy/lib/_shape_base_impl.py b/numpy/lib/_shape_base_impl.py index 89b86c80964d..c9e0fd316e04 100644 --- a/numpy/lib/_shape_base_impl.py +++ b/numpy/lib/_shape_base_impl.py @@ -1,6 +1,7 @@ import functools import warnings +import numpy as np import numpy._core.numeric as _nx from numpy._core import atleast_3d, overrides, vstack from numpy._core._multiarray_umath import _array_converter @@ -171,15 +172,13 @@ def take_along_axis(arr, indices, axis=-1): if indices.ndim != 1: raise ValueError( 'when axis=None, `indices` must have a single dimension.') - arr = arr.flat - arr_shape = (len(arr),) # flatiter has no .shape + arr = np.array(arr.flat) axis = 0 else: axis = normalize_axis_index(axis, arr.ndim) - arr_shape = arr.shape # use the fancy index - return arr[_make_along_axis_idx(arr_shape, indices, axis)] + return arr[_make_along_axis_idx(arr.shape, indices, axis)] def _put_along_axis_dispatcher(arr, indices, values, axis): @@ -263,15 +262,13 @@ def put_along_axis(arr, indices, values, axis): if indices.ndim != 1: raise ValueError( 'when axis=None, `indices` must have a single dimension.') - arr = arr.flat + arr = np.array(arr.flat) axis = 0 - arr_shape = (len(arr),) # flatiter has no .shape else: axis = normalize_axis_index(axis, arr.ndim) - arr_shape = arr.shape # use the fancy index - arr[_make_along_axis_idx(arr_shape, indices, axis)] = values + arr[_make_along_axis_idx(arr.shape, indices, axis)] = values def _apply_along_axis_dispatcher(func1d, axis, arr, *args, **kwargs): @@ -653,11 +650,11 @@ def column_stack(tup): -------- >>> import numpy as np >>> a = np.array((1,2,3)) - >>> b = np.array((2,3,4)) + >>> b = np.array((4,5,6)) >>> np.column_stack((a,b)) - array([[1, 2], - [2, 3], - [3, 4]]) + array([[1, 4], + [2, 5], + [3, 6]]) """ arrays = [] @@ -713,18 +710,18 @@ def dstack(tup): -------- >>> import numpy as np >>> a = np.array((1,2,3)) - >>> b = np.array((2,3,4)) + >>> b = np.array((4,5,6)) >>> np.dstack((a,b)) - array([[[1, 2], - [2, 3], - [3, 4]]]) + array([[[1, 4], + [2, 5], + [3, 6]]]) >>> a = np.array([[1],[2],[3]]) - >>> b = np.array([[2],[3],[4]]) + >>> b = np.array([[4],[5],[6]]) >>> np.dstack((a,b)) - array([[[1, 2]], - [[2, 3]], - [[3, 4]]]) + array([[[1, 4]], + [[2, 5]], + [[3, 6]]]) """ arrs = atleast_3d(*tup) @@ -733,15 +730,6 @@ def dstack(tup): return _nx.concatenate(arrs, 2) -def _replace_zero_by_x_arrays(sub_arys): - for i in range(len(sub_arys)): - if _nx.ndim(sub_arys[i]) == 0: - sub_arys[i] = _nx.empty(0, dtype=sub_arys[i].dtype) - elif _nx.sometrue(_nx.equal(_nx.shape(sub_arys[i]), 0)): - sub_arys[i] = _nx.empty(0, dtype=sub_arys[i].dtype) - return sub_arys - - def _array_split_dispatcher(ary, indices_or_sections, axis=None): return (ary, indices_or_sections) @@ -1113,7 +1101,7 @@ def kron(a, b): ----- The function assumes that the number of dimensions of `a` and `b` are the same, if necessary prepending the smallest with ones. - If ``a.shape = (r0,r1,..,rN)`` and ``b.shape = (s0,s1,...,sN)``, + If ``a.shape = (r0,r1,...,rN)`` and ``b.shape = (s0,s1,...,sN)``, the Kronecker product has shape ``(r0*s0, r1*s1, ..., rN*SN)``. The elements are products of elements from `a` and `b`, organized explicitly by:: diff --git a/numpy/lib/_shape_base_impl.pyi b/numpy/lib/_shape_base_impl.pyi index a50d372bb97e..8037a01ac998 100644 --- a/numpy/lib/_shape_base_impl.pyi +++ b/numpy/lib/_shape_base_impl.pyi @@ -1,15 +1,5 @@ from collections.abc import Callable, Sequence -from typing import ( - Any, - Concatenate, - ParamSpec, - Protocol, - SupportsIndex, - TypeVar, - overload, - type_check_only, -) - +from typing import Any, Concatenate, Protocol, SupportsIndex, overload, type_check_only from typing_extensions import deprecated import numpy as np @@ -17,7 +7,6 @@ from numpy import ( _CastingKind, complexfloating, floating, - generic, integer, object_, signedinteger, @@ -56,9 +45,6 @@ __all__ = [ "put_along_axis", ] -_P = ParamSpec("_P") -_ScalarT = TypeVar("_ScalarT", bound=generic) - # Signature of `__array_wrap__` @type_check_only class _ArrayWrap(Protocol): @@ -77,52 +63,46 @@ class _SupportsArrayWrap(Protocol): ### -def take_along_axis( - arr: _ScalarT | NDArray[_ScalarT], +def take_along_axis[ScalarT: np.generic]( + arr: ScalarT | NDArray[ScalarT], indices: NDArray[integer], - axis: int | None = ..., -) -> NDArray[_ScalarT]: ... + axis: int | None = -1, +) -> NDArray[ScalarT]: ... -def put_along_axis( - arr: NDArray[_ScalarT], +def put_along_axis[ScalarT: np.generic]( + arr: NDArray[ScalarT], indices: NDArray[integer], values: ArrayLike, axis: int | None, ) -> None: ... @overload -def apply_along_axis( - func1d: Callable[Concatenate[NDArray[Any], _P], _ArrayLike[_ScalarT]], +def apply_along_axis[**Tss, ScalarT: np.generic]( + func1d: Callable[Concatenate[np.ndarray, Tss], _ArrayLike[ScalarT]], axis: SupportsIndex, arr: ArrayLike, - *args: _P.args, - **kwargs: _P.kwargs, -) -> NDArray[_ScalarT]: ... + *args: Tss.args, + **kwargs: Tss.kwargs, +) -> NDArray[ScalarT]: ... @overload -def apply_along_axis( - func1d: Callable[Concatenate[NDArray[Any], _P], Any], +def apply_along_axis[**Tss]( + func1d: Callable[Concatenate[NDArray[Any], Tss], Any], axis: SupportsIndex, arr: ArrayLike, - *args: _P.args, - **kwargs: _P.kwargs, + *args: Tss.args, + **kwargs: Tss.kwargs, ) -> NDArray[Any]: ... -def apply_over_axes( - func: Callable[[NDArray[Any], int], NDArray[_ScalarT]], +def apply_over_axes[ScalarT: np.generic]( + func: Callable[[np.ndarray, int], NDArray[ScalarT]], a: ArrayLike, axes: int | Sequence[int], -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload -def expand_dims( - a: _ArrayLike[_ScalarT], - axis: _ShapeLike, -) -> NDArray[_ScalarT]: ... +def expand_dims[ScalarT: np.generic](a: _ArrayLike[ScalarT], axis: _ShapeLike) -> NDArray[ScalarT]: ... @overload -def expand_dims( - a: ArrayLike, - axis: _ShapeLike, -) -> NDArray[Any]: ... +def expand_dims(a: ArrayLike, axis: _ShapeLike) -> NDArray[Any]: ... # Deprecated in NumPy 2.0, 2023-08-18 @deprecated("`row_stack` alias is deprecated. Use `np.vstack` directly.") @@ -133,48 +113,50 @@ def row_stack( casting: _CastingKind = "same_kind", ) -> NDArray[Any]: ... -# +# keep in sync with `numpy.ma.extras.column_stack` @overload -def column_stack(tup: Sequence[_ArrayLike[_ScalarT]]) -> NDArray[_ScalarT]: ... +def column_stack[ScalarT: np.generic](tup: Sequence[_ArrayLike[ScalarT]]) -> NDArray[ScalarT]: ... @overload def column_stack(tup: Sequence[ArrayLike]) -> NDArray[Any]: ... +# keep in sync with `numpy.ma.extras.dstack` @overload -def dstack(tup: Sequence[_ArrayLike[_ScalarT]]) -> NDArray[_ScalarT]: ... +def dstack[ScalarT: np.generic](tup: Sequence[_ArrayLike[ScalarT]]) -> NDArray[ScalarT]: ... @overload def dstack(tup: Sequence[ArrayLike]) -> NDArray[Any]: ... @overload -def array_split( - ary: _ArrayLike[_ScalarT], +def array_split[ScalarT: np.generic]( + ary: _ArrayLike[ScalarT], indices_or_sections: _ShapeLike, - axis: SupportsIndex = ..., -) -> list[NDArray[_ScalarT]]: ... + axis: SupportsIndex = 0, +) -> list[NDArray[ScalarT]]: ... @overload def array_split( ary: ArrayLike, indices_or_sections: _ShapeLike, - axis: SupportsIndex = ..., + axis: SupportsIndex = 0, ) -> list[NDArray[Any]]: ... @overload -def split( - ary: _ArrayLike[_ScalarT], +def split[ScalarT: np.generic]( + ary: _ArrayLike[ScalarT], indices_or_sections: _ShapeLike, - axis: SupportsIndex = ..., -) -> list[NDArray[_ScalarT]]: ... + axis: SupportsIndex = 0, +) -> list[NDArray[ScalarT]]: ... @overload def split( ary: ArrayLike, indices_or_sections: _ShapeLike, - axis: SupportsIndex = ..., + axis: SupportsIndex = 0, ) -> list[NDArray[Any]]: ... +# keep in sync with `numpy.ma.extras.hsplit` @overload -def hsplit( - ary: _ArrayLike[_ScalarT], +def hsplit[ScalarT: np.generic]( + ary: _ArrayLike[ScalarT], indices_or_sections: _ShapeLike, -) -> list[NDArray[_ScalarT]]: ... +) -> list[NDArray[ScalarT]]: ... @overload def hsplit( ary: ArrayLike, @@ -182,10 +164,10 @@ def hsplit( ) -> list[NDArray[Any]]: ... @overload -def vsplit( - ary: _ArrayLike[_ScalarT], +def vsplit[ScalarT: np.generic]( + ary: _ArrayLike[ScalarT], indices_or_sections: _ShapeLike, -) -> list[NDArray[_ScalarT]]: ... +) -> list[NDArray[ScalarT]]: ... @overload def vsplit( ary: ArrayLike, @@ -193,10 +175,10 @@ def vsplit( ) -> list[NDArray[Any]]: ... @overload -def dsplit( - ary: _ArrayLike[_ScalarT], +def dsplit[ScalarT: np.generic]( + ary: _ArrayLike[ScalarT], indices_or_sections: _ShapeLike, -) -> list[NDArray[_ScalarT]]: ... +) -> list[NDArray[ScalarT]]: ... @overload def dsplit( ary: ArrayLike, @@ -209,13 +191,13 @@ def get_array_wrap(*args: _SupportsArrayWrap) -> _ArrayWrap: ... def get_array_wrap(*args: object) -> _ArrayWrap | None: ... @overload -def kron(a: _ArrayLikeBool_co, b: _ArrayLikeBool_co) -> NDArray[np.bool]: ... # type: ignore[misc] +def kron(a: _ArrayLikeBool_co, b: _ArrayLikeBool_co) -> NDArray[np.bool]: ... @overload -def kron(a: _ArrayLikeUInt_co, b: _ArrayLikeUInt_co) -> NDArray[unsignedinteger]: ... # type: ignore[misc] +def kron(a: _ArrayLikeUInt_co, b: _ArrayLikeUInt_co) -> NDArray[unsignedinteger]: ... @overload -def kron(a: _ArrayLikeInt_co, b: _ArrayLikeInt_co) -> NDArray[signedinteger]: ... # type: ignore[misc] +def kron(a: _ArrayLikeInt_co, b: _ArrayLikeInt_co) -> NDArray[signedinteger]: ... @overload -def kron(a: _ArrayLikeFloat_co, b: _ArrayLikeFloat_co) -> NDArray[floating]: ... # type: ignore[misc] +def kron(a: _ArrayLikeFloat_co, b: _ArrayLikeFloat_co) -> NDArray[floating]: ... @overload def kron(a: _ArrayLikeComplex_co, b: _ArrayLikeComplex_co) -> NDArray[complexfloating]: ... @overload @@ -224,10 +206,10 @@ def kron(a: _ArrayLikeObject_co, b: Any) -> NDArray[object_]: ... def kron(a: Any, b: _ArrayLikeObject_co) -> NDArray[object_]: ... @overload -def tile( - A: _ArrayLike[_ScalarT], +def tile[ScalarT: np.generic]( + A: _ArrayLike[ScalarT], reps: int | Sequence[int], -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload def tile( A: ArrayLike, diff --git a/numpy/lib/_stride_tricks_impl.py b/numpy/lib/_stride_tricks_impl.py index d4780783a638..98a79b325f66 100644 --- a/numpy/lib/_stride_tricks_impl.py +++ b/numpy/lib/_stride_tricks_impl.py @@ -173,6 +173,14 @@ def sliding_window_view(x, window_shape, axis=None, *, Notes ----- + .. warning:: + + This function creates views with overlapping memory. When + ``writeable=True``, writing to the view will modify the original array + and may affect multiple view positions. See the examples below and + :doc:`this guide ` + about the difference between copies and views. + For many applications using a sliding window view can be convenient, but potentially very slow. Often specialized solutions exist, for example: @@ -297,6 +305,31 @@ def sliding_window_view(x, window_shape, axis=None, *, >>> moving_average array([1., 2., 3., 4.]) + The two examples below demonstrate the effect of ``writeable=True``. + + Creating a view with the default ``writeable=False`` and then writing to + it raises an error. + + >>> v = sliding_window_view(x, 3) + >>> v[0,1] = 10 + Traceback (most recent call last): + ... + ValueError: assignment destination is read-only + + Creating a view with ``writeable=True`` and then writing to it changes + the original array and multiple view positions. + + >>> x = np.arange(6) # reset x for the second example + >>> v = sliding_window_view(x, 3, writeable=True) + >>> v[0,1] = 10 + >>> x + array([ 0, 10, 2, 3, 4, 5]) + >>> v + array([[ 0, 10, 2], + [10, 2, 3], + [ 2, 3, 4], + [ 3, 4, 5]]) + Note that a sliding window approach is often **not** optimal (see Notes). """ window_shape = (tuple(window_shape) @@ -416,14 +449,14 @@ def _broadcast_shape(*args): """ # use the old-iterator because np.nditer does not handle size 0 arrays # consistently - b = np.broadcast(*args[:32]) - # unfortunately, it cannot handle 32 or more arguments directly - for pos in range(32, len(args), 31): + b = np.broadcast(*args[:64]) + # unfortunately, it cannot handle 64 or more arguments directly + for pos in range(64, len(args), 63): # ironically, np.broadcast does not properly handle np.broadcast # objects (it treats them as scalars) # use broadcasting to avoid allocating the full array b = broadcast_to(0, b.shape) - b = np.broadcast(b, *args[pos:(pos + 31)]) + b = np.broadcast(b, *args[pos:(pos + 63)]) return b.shape @@ -534,7 +567,7 @@ def broadcast_arrays(*args, subok=False): [5, 5, 5]])] """ - # nditer is not used here to avoid the limit of 32 arrays. + # nditer is not used here to avoid the limit of 64 arrays. # Otherwise, something like the following one-liner would suffice: # return np.nditer(args, flags=['multi_index', 'zerosize_ok'], # order='C').itviews diff --git a/numpy/lib/_stride_tricks_impl.pyi b/numpy/lib/_stride_tricks_impl.pyi index a7005d702d96..77b9d60b9d7f 100644 --- a/numpy/lib/_stride_tricks_impl.pyi +++ b/numpy/lib/_stride_tricks_impl.pyi @@ -1,74 +1,71 @@ from collections.abc import Iterable -from typing import Any, SupportsIndex, TypeVar, overload +from typing import Any, SupportsIndex, overload -from numpy import generic +import numpy as np from numpy._typing import ArrayLike, NDArray, _AnyShape, _ArrayLike, _ShapeLike __all__ = ["broadcast_to", "broadcast_arrays", "broadcast_shapes"] -_ScalarT = TypeVar("_ScalarT", bound=generic) - class DummyArray: __array_interface__: dict[str, Any] base: NDArray[Any] | None def __init__( self, interface: dict[str, Any], - base: NDArray[Any] | None = ..., + base: NDArray[Any] | None = None, ) -> None: ... @overload -def as_strided( - x: _ArrayLike[_ScalarT], - shape: Iterable[int] | None = ..., - strides: Iterable[int] | None = ..., - subok: bool = ..., - writeable: bool = ..., -) -> NDArray[_ScalarT]: ... +def as_strided[ScalarT: np.generic]( + x: _ArrayLike[ScalarT], + shape: Iterable[int] | None = None, + strides: Iterable[int] | None = None, + subok: bool = False, + writeable: bool = True, +) -> NDArray[ScalarT]: ... @overload def as_strided( x: ArrayLike, - shape: Iterable[int] | None = ..., - strides: Iterable[int] | None = ..., - subok: bool = ..., - writeable: bool = ..., + shape: Iterable[int] | None = None, + strides: Iterable[int] | None = None, + subok: bool = False, + writeable: bool = True, ) -> NDArray[Any]: ... @overload -def sliding_window_view( - x: _ArrayLike[_ScalarT], +def sliding_window_view[ScalarT: np.generic]( + x: _ArrayLike[ScalarT], window_shape: int | Iterable[int], - axis: SupportsIndex | None = ..., + axis: SupportsIndex | None = None, *, - subok: bool = ..., - writeable: bool = ..., -) -> NDArray[_ScalarT]: ... + subok: bool = False, + writeable: bool = False, +) -> NDArray[ScalarT]: ... @overload def sliding_window_view( x: ArrayLike, window_shape: int | Iterable[int], - axis: SupportsIndex | None = ..., + axis: SupportsIndex | None = None, *, - subok: bool = ..., - writeable: bool = ..., + subok: bool = False, + writeable: bool = False, ) -> NDArray[Any]: ... @overload -def broadcast_to( - array: _ArrayLike[_ScalarT], +def broadcast_to[ScalarT: np.generic]( + array: _ArrayLike[ScalarT], shape: int | Iterable[int], - subok: bool = ..., -) -> NDArray[_ScalarT]: ... + subok: bool = False, +) -> NDArray[ScalarT]: ... @overload def broadcast_to( array: ArrayLike, shape: int | Iterable[int], - subok: bool = ..., + subok: bool = False, ) -> NDArray[Any]: ... def broadcast_shapes(*args: _ShapeLike) -> _AnyShape: ... +def broadcast_arrays(*args: ArrayLike, subok: bool = False) -> tuple[NDArray[Any], ...]: ... -def broadcast_arrays( - *args: ArrayLike, - subok: bool = ..., -) -> tuple[NDArray[Any], ...]: ... +# used internally by `lib._function_base_impl._parse_input_dimensions` +def _broadcast_shape(*args: ArrayLike) -> _AnyShape: ... diff --git a/numpy/lib/_twodim_base_impl.py b/numpy/lib/_twodim_base_impl.py index dc6a55886fdb..f92bfe9ce104 100644 --- a/numpy/lib/_twodim_base_impl.py +++ b/numpy/lib/_twodim_base_impl.py @@ -217,7 +217,7 @@ def eye(N, M=None, k=0, dtype=float, order='C', *, device=None, like=None): Examples -------- >>> import numpy as np - >>> np.eye(2, dtype=int) + >>> np.eye(2, dtype=np.int_) array([[1, 0], [0, 1]]) >>> np.eye(3, k=1) @@ -418,7 +418,7 @@ def tri(N, M=None, k=0, dtype=float, *, like=None): Examples -------- >>> import numpy as np - >>> np.tri(3, 5, 2, dtype=int) + >>> np.tri(3, 5, 2, dtype=np.int_) array([[1, 1, 1, 0, 0], [1, 1, 1, 1, 0], [1, 1, 1, 1, 1]]) @@ -926,7 +926,7 @@ def tril_indices(n, k=0, m=None): ------- inds : tuple of arrays The row and column indices, respectively. The row indices are sorted - in non-decreasing order, and the correspdonding column indices are + in non-decreasing order, and the corresponding column indices are strictly increasing for each row. See also @@ -1073,7 +1073,7 @@ def triu_indices(n, k=0, m=None): ------- inds : tuple, shape(2) of ndarrays, shape(`n`) The row and column indices, respectively. The row indices are sorted - in non-decreasing order, and the correspdonding column indices are + in non-decreasing order, and the corresponding column indices are strictly increasing for each row. See also diff --git a/numpy/lib/_twodim_base_impl.pyi b/numpy/lib/_twodim_base_impl.pyi index 43df38ed5b06..63f5f4cdc9c0 100644 --- a/numpy/lib/_twodim_base_impl.pyi +++ b/numpy/lib/_twodim_base_impl.pyi @@ -1,39 +1,17 @@ +from _typeshed import Incomplete from collections.abc import Callable, Sequence -from typing import ( - Any, - TypeAlias, - TypeVar, - overload, -) -from typing import ( - Literal as L, -) +from typing import Any, Literal as L, Never, Protocol, overload, type_check_only import numpy as np -from numpy import ( - _OrderCF, - complex128, - complexfloating, - datetime64, - float64, - floating, - generic, - int_, - intp, - object_, - signedinteger, - timedelta64, -) +from numpy import _OrderCF from numpy._typing import ( ArrayLike, DTypeLike, NDArray, _ArrayLike, - _ArrayLikeComplex_co, - _ArrayLikeFloat_co, - _ArrayLikeInt_co, - _ArrayLikeObject_co, _DTypeLike, + _NumberLike_co, + _ScalarLike_co, _SupportsArray, _SupportsArrayFunc, ) @@ -58,381 +36,355 @@ __all__ = [ ### -_T = TypeVar("_T") -_ScalarT = TypeVar("_ScalarT", bound=generic) -_ComplexFloatingT = TypeVar("_ComplexFloatingT", bound=np.complexfloating) -_InexactT = TypeVar("_InexactT", bound=np.inexact) -_NumberCoT = TypeVar("_NumberCoT", bound=_Number_co) +type _Int_co = np.integer | np.bool +type _Float_co = np.floating | _Int_co +type _Number_co = np.number | np.bool + +type _Array1D[ScalarT: np.generic] = np.ndarray[tuple[int], np.dtype[ScalarT]] +type _Array2D[ScalarT: np.generic] = np.ndarray[tuple[int, int], np.dtype[ScalarT]] +# Workaround for mypy's and pyright's lack of compliance with the typing spec for +# overloads for gradual types. This works because only `Any` and `Never` are assignable +# to `Never`. +type _ArrayNoD[ScalarT: np.generic] = np.ndarray[tuple[Never] | tuple[Never, Never], np.dtype[ScalarT]] + +type _ArrayLike1D[ScalarT: np.generic] = _SupportsArray[np.dtype[ScalarT]] | Sequence[ScalarT] +type _ArrayLike1DInt_co = _SupportsArray[np.dtype[_Int_co]] | Sequence[int | _Int_co] +type _ArrayLike1DFloat_co = _SupportsArray[np.dtype[_Float_co]] | Sequence[float | _Float_co] +type _ArrayLike2DFloat_co = _SupportsArray[np.dtype[_Float_co]] | Sequence[_ArrayLike1DFloat_co] +type _ArrayLike1DNumber_co = _SupportsArray[np.dtype[_Number_co]] | Sequence[complex | _Number_co] # The returned arrays dtype must be compatible with `np.equal` -_MaskFunc: TypeAlias = Callable[[NDArray[int_], _T], NDArray[_Number_co | timedelta64 | datetime64 | object_]] +type _MaskFunc[_T] = Callable[[NDArray[np.int_], _T], NDArray[_Number_co | np.timedelta64 | np.datetime64 | np.object_]] -_Int_co: TypeAlias = np.integer | np.bool -_Float_co: TypeAlias = np.floating | _Int_co -_Number_co: TypeAlias = np.number | np.bool +type _Indices2D = tuple[_Array1D[np.intp], _Array1D[np.intp]] +type _Histogram2D[ScalarT: np.generic] = tuple[_Array1D[np.float64], _Array1D[ScalarT], _Array1D[ScalarT]] -_ArrayLike1D: TypeAlias = _SupportsArray[np.dtype[_ScalarT]] | Sequence[_ScalarT] -_ArrayLike1DInt_co: TypeAlias = _SupportsArray[np.dtype[_Int_co]] | Sequence[int | _Int_co] -_ArrayLike1DFloat_co: TypeAlias = _SupportsArray[np.dtype[_Float_co]] | Sequence[float | _Float_co] -_ArrayLike2DFloat_co: TypeAlias = _SupportsArray[np.dtype[_Float_co]] | Sequence[_ArrayLike1DFloat_co] -_ArrayLike1DNumber_co: TypeAlias = _SupportsArray[np.dtype[_Number_co]] | Sequence[complex | _Number_co] +@type_check_only +class _HasShapeAndNDim(Protocol): + @property # TODO: require 2d shape once shape-typing has matured + def shape(self) -> tuple[int, ...]: ... + @property + def ndim(self) -> int: ... ### +# keep in sync with `flipud` +@overload +def fliplr[ArrayT: np.ndarray](m: ArrayT) -> ArrayT: ... @overload -def fliplr(m: _ArrayLike[_ScalarT]) -> NDArray[_ScalarT]: ... +def fliplr[ScalarT: np.generic](m: _ArrayLike[ScalarT]) -> NDArray[ScalarT]: ... @overload def fliplr(m: ArrayLike) -> NDArray[Any]: ... +# keep in sync with `fliplr` @overload -def flipud(m: _ArrayLike[_ScalarT]) -> NDArray[_ScalarT]: ... +def flipud[ArrayT: np.ndarray](m: ArrayT) -> ArrayT: ... +@overload +def flipud[ScalarT: np.generic](m: _ArrayLike[ScalarT]) -> NDArray[ScalarT]: ... @overload def flipud(m: ArrayLike) -> NDArray[Any]: ... +# @overload def eye( N: int, - M: int | None = ..., - k: int = ..., - dtype: None = ..., - order: _OrderCF = ..., + M: int | None = None, + k: int = 0, + dtype: None = ..., # = float # stubdefaulter: ignore[missing-default] + order: _OrderCF = "C", *, - device: L["cpu"] | None = ..., - like: _SupportsArrayFunc | None = ..., -) -> NDArray[float64]: ... + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, +) -> _Array2D[np.float64]: ... @overload -def eye( +def eye[ScalarT: np.generic]( N: int, M: int | None, k: int, - dtype: _DTypeLike[_ScalarT], - order: _OrderCF = ..., + dtype: _DTypeLike[ScalarT], + order: _OrderCF = "C", *, - device: L["cpu"] | None = ..., - like: _SupportsArrayFunc | None = ..., -) -> NDArray[_ScalarT]: ... + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, +) -> _Array2D[ScalarT]: ... @overload -def eye( +def eye[ScalarT: np.generic]( N: int, - M: int | None = ..., - k: int = ..., + M: int | None = None, + k: int = 0, *, - dtype: _DTypeLike[_ScalarT], - order: _OrderCF = ..., - device: L["cpu"] | None = ..., - like: _SupportsArrayFunc | None = ..., -) -> NDArray[_ScalarT]: ... + dtype: _DTypeLike[ScalarT], + order: _OrderCF = "C", + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, +) -> _Array2D[ScalarT]: ... @overload def eye( N: int, - M: int | None = ..., - k: int = ..., - dtype: DTypeLike = ..., - order: _OrderCF = ..., + M: int | None = None, + k: int = 0, + dtype: DTypeLike | None = ..., # = float + order: _OrderCF = "C", *, - device: L["cpu"] | None = ..., - like: _SupportsArrayFunc | None = ..., -) -> NDArray[Any]: ... + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, +) -> _Array2D[Incomplete]: ... +# +@overload +def diag[ScalarT: np.generic](v: _ArrayNoD[ScalarT] | Sequence[Sequence[ScalarT]], k: int = 0) -> NDArray[ScalarT]: ... +@overload +def diag[ScalarT: np.generic](v: _Array2D[ScalarT] | Sequence[Sequence[ScalarT]], k: int = 0) -> _Array1D[ScalarT]: ... @overload -def diag(v: _ArrayLike[_ScalarT], k: int = ...) -> NDArray[_ScalarT]: ... +def diag[ScalarT: np.generic](v: _Array1D[ScalarT] | Sequence[ScalarT], k: int = 0) -> _Array2D[ScalarT]: ... @overload -def diag(v: ArrayLike, k: int = ...) -> NDArray[Any]: ... +def diag(v: Sequence[Sequence[_ScalarLike_co]], k: int = 0) -> _Array1D[Incomplete]: ... +@overload +def diag(v: Sequence[_ScalarLike_co], k: int = 0) -> _Array2D[Incomplete]: ... +@overload +def diag[ScalarT: np.generic](v: _ArrayLike[ScalarT], k: int = 0) -> NDArray[ScalarT]: ... +@overload +def diag(v: ArrayLike, k: int = 0) -> NDArray[Incomplete]: ... +# keep in sync with `numpy.ma.extras.diagflat` @overload -def diagflat(v: _ArrayLike[_ScalarT], k: int = ...) -> NDArray[_ScalarT]: ... +def diagflat[ScalarT: np.generic](v: _ArrayLike[ScalarT], k: int = 0) -> _Array2D[ScalarT]: ... @overload -def diagflat(v: ArrayLike, k: int = ...) -> NDArray[Any]: ... +def diagflat(v: ArrayLike, k: int = 0) -> _Array2D[Incomplete]: ... +# @overload def tri( N: int, - M: int | None = ..., - k: int = ..., - dtype: None = ..., + M: int | None = None, + k: int = 0, + dtype: None = ..., # = float # stubdefaulter: ignore[missing-default] *, - like: _SupportsArrayFunc | None = ... -) -> NDArray[float64]: ... + like: _SupportsArrayFunc | None = None +) -> _Array2D[np.float64]: ... @overload -def tri( +def tri[ScalarT: np.generic]( N: int, M: int | None, k: int, - dtype: _DTypeLike[_ScalarT], + dtype: _DTypeLike[ScalarT], *, - like: _SupportsArrayFunc | None = ... -) -> NDArray[_ScalarT]: ... + like: _SupportsArrayFunc | None = None +) -> _Array2D[ScalarT]: ... @overload -def tri( +def tri[ScalarT: np.generic]( N: int, - M: int | None = ..., - k: int = ..., + M: int | None = None, + k: int = 0, *, - dtype: _DTypeLike[_ScalarT], - like: _SupportsArrayFunc | None = ... -) -> NDArray[_ScalarT]: ... + dtype: _DTypeLike[ScalarT], + like: _SupportsArrayFunc | None = None +) -> _Array2D[ScalarT]: ... @overload def tri( N: int, - M: int | None = ..., - k: int = ..., - dtype: DTypeLike = ..., + M: int | None = None, + k: int = 0, + dtype: DTypeLike | None = ..., # = float *, - like: _SupportsArrayFunc | None = ... -) -> NDArray[Any]: ... + like: _SupportsArrayFunc | None = None +) -> _Array2D[Any]: ... +# keep in sync with `triu` @overload -def tril(m: _ArrayLike[_ScalarT], k: int = 0) -> NDArray[_ScalarT]: ... +def tril[ArrayT: np.ndarray](m: ArrayT, k: int = 0) -> ArrayT: ... +@overload +def tril[ScalarT: np.generic](m: _ArrayLike[ScalarT], k: int = 0) -> NDArray[ScalarT]: ... @overload def tril(m: ArrayLike, k: int = 0) -> NDArray[Any]: ... +# keep in sync with `tril` @overload -def triu(m: _ArrayLike[_ScalarT], k: int = 0) -> NDArray[_ScalarT]: ... +def triu[ArrayT: np.ndarray](m: ArrayT, k: int = 0) -> ArrayT: ... @overload -def triu(m: ArrayLike, k: int = 0) -> NDArray[Any]: ... - +def triu[ScalarT: np.generic](m: _ArrayLike[ScalarT], k: int = 0) -> NDArray[ScalarT]: ... @overload -def vander( # type: ignore[misc] - x: _ArrayLikeInt_co, - N: int | None = ..., - increasing: bool = ..., -) -> NDArray[signedinteger]: ... -@overload -def vander( # type: ignore[misc] - x: _ArrayLikeFloat_co, - N: int | None = ..., - increasing: bool = ..., -) -> NDArray[floating]: ... -@overload -def vander( - x: _ArrayLikeComplex_co, - N: int | None = ..., - increasing: bool = ..., -) -> NDArray[complexfloating]: ... -@overload -def vander( - x: _ArrayLikeObject_co, - N: int | None = ..., - increasing: bool = ..., -) -> NDArray[object_]: ... +def triu(m: ArrayLike, k: int = 0) -> NDArray[Any]: ... +# we use `list` (invariant) instead of `Sequence` (covariant) to avoid overlap @overload -def histogram2d( - x: _ArrayLike1D[_ComplexFloatingT], - y: _ArrayLike1D[_ComplexFloatingT | _Float_co], - bins: int | Sequence[int] = ..., - range: _ArrayLike2DFloat_co | None = ..., - density: bool | None = ..., - weights: _ArrayLike1DFloat_co | None = ..., -) -> tuple[ - NDArray[float64], - NDArray[_ComplexFloatingT], - NDArray[_ComplexFloatingT], -]: ... +def vander[ScalarT: np.number | np.object_](x: _ArrayLike1D[ScalarT], N: int | None = None, increasing: bool = False) -> _Array2D[ScalarT]: ... @overload -def histogram2d( - x: _ArrayLike1D[_ComplexFloatingT | _Float_co], - y: _ArrayLike1D[_ComplexFloatingT], - bins: int | Sequence[int] = ..., - range: _ArrayLike2DFloat_co | None = ..., - density: bool | None = ..., - weights: _ArrayLike1DFloat_co | None = ..., -) -> tuple[ - NDArray[float64], - NDArray[_ComplexFloatingT], - NDArray[_ComplexFloatingT], -]: ... +def vander(x: _ArrayLike1D[np.bool] | list[int], N: int | None = None, increasing: bool = False) -> _Array2D[np.int_]: ... @overload -def histogram2d( - x: _ArrayLike1D[_InexactT], - y: _ArrayLike1D[_InexactT | _Int_co], - bins: int | Sequence[int] = ..., - range: _ArrayLike2DFloat_co | None = ..., - density: bool | None = ..., - weights: _ArrayLike1DFloat_co | None = ..., -) -> tuple[ - NDArray[float64], - NDArray[_InexactT], - NDArray[_InexactT], -]: ... +def vander(x: list[float], N: int | None = None, increasing: bool = False) -> _Array2D[np.float64]: ... @overload -def histogram2d( - x: _ArrayLike1D[_InexactT | _Int_co], - y: _ArrayLike1D[_InexactT], - bins: int | Sequence[int] = ..., - range: _ArrayLike2DFloat_co | None = ..., - density: bool | None = ..., - weights: _ArrayLike1DFloat_co | None = ..., -) -> tuple[ - NDArray[float64], - NDArray[_InexactT], - NDArray[_InexactT], -]: ... +def vander(x: list[complex], N: int | None = None, increasing: bool = False) -> _Array2D[np.complex128]: ... +@overload # fallback +def vander(x: Sequence[_NumberLike_co], N: int | None = None, increasing: bool = False) -> _Array2D[Any]: ... + +# +@overload +def histogram2d[ScalarT: np.complexfloating]( + x: _ArrayLike1D[ScalarT], + y: _ArrayLike1D[ScalarT | _Float_co], + bins: int | Sequence[int] = 10, + range: _ArrayLike2DFloat_co | None = None, + density: bool | None = None, + weights: _ArrayLike1DFloat_co | None = None, +) -> _Histogram2D[ScalarT]: ... +@overload +def histogram2d[ScalarT: np.complexfloating]( + x: _ArrayLike1D[ScalarT | _Float_co], + y: _ArrayLike1D[ScalarT], + bins: int | Sequence[int] = 10, + range: _ArrayLike2DFloat_co | None = None, + density: bool | None = None, + weights: _ArrayLike1DFloat_co | None = None, +) -> _Histogram2D[ScalarT]: ... +@overload +def histogram2d[ScalarT: np.inexact]( + x: _ArrayLike1D[ScalarT], + y: _ArrayLike1D[ScalarT | _Int_co], + bins: int | Sequence[int] = 10, + range: _ArrayLike2DFloat_co | None = None, + density: bool | None = None, + weights: _ArrayLike1DFloat_co | None = None, +) -> _Histogram2D[ScalarT]: ... +@overload +def histogram2d[ScalarT: np.inexact]( + x: _ArrayLike1D[ScalarT | _Int_co], + y: _ArrayLike1D[ScalarT], + bins: int | Sequence[int] = 10, + range: _ArrayLike2DFloat_co | None = None, + density: bool | None = None, + weights: _ArrayLike1DFloat_co | None = None, +) -> _Histogram2D[ScalarT]: ... @overload def histogram2d( x: _ArrayLike1DInt_co | Sequence[float], y: _ArrayLike1DInt_co | Sequence[float], - bins: int | Sequence[int] = ..., - range: _ArrayLike2DFloat_co | None = ..., - density: bool | None = ..., - weights: _ArrayLike1DFloat_co | None = ..., -) -> tuple[ - NDArray[float64], - NDArray[float64], - NDArray[float64], -]: ... + bins: int | Sequence[int] = 10, + range: _ArrayLike2DFloat_co | None = None, + density: bool | None = None, + weights: _ArrayLike1DFloat_co | None = None, +) -> _Histogram2D[np.float64]: ... @overload def histogram2d( x: Sequence[complex], y: Sequence[complex], - bins: int | Sequence[int] = ..., - range: _ArrayLike2DFloat_co | None = ..., - density: bool | None = ..., - weights: _ArrayLike1DFloat_co | None = ..., -) -> tuple[ - NDArray[float64], - NDArray[complex128 | float64], - NDArray[complex128 | float64], -]: ... + bins: int | Sequence[int] = 10, + range: _ArrayLike2DFloat_co | None = None, + density: bool | None = None, + weights: _ArrayLike1DFloat_co | None = None, +) -> _Histogram2D[np.complex128 | Any]: ... @overload -def histogram2d( +def histogram2d[ScalarT: _Number_co]( x: _ArrayLike1DNumber_co, y: _ArrayLike1DNumber_co, - bins: _ArrayLike1D[_NumberCoT] | Sequence[_ArrayLike1D[_NumberCoT]], - range: _ArrayLike2DFloat_co | None = ..., - density: bool | None = ..., - weights: _ArrayLike1DFloat_co | None = ..., -) -> tuple[ - NDArray[float64], - NDArray[_NumberCoT], - NDArray[_NumberCoT], -]: ... -@overload -def histogram2d( - x: _ArrayLike1D[_InexactT], - y: _ArrayLike1D[_InexactT], - bins: Sequence[_ArrayLike1D[_NumberCoT] | int], - range: _ArrayLike2DFloat_co | None = ..., - density: bool | None = ..., - weights: _ArrayLike1DFloat_co | None = ..., -) -> tuple[ - NDArray[float64], - NDArray[_NumberCoT | _InexactT], - NDArray[_NumberCoT | _InexactT], -]: ... + bins: _ArrayLike1D[ScalarT] | Sequence[_ArrayLike1D[ScalarT]], + range: _ArrayLike2DFloat_co | None = None, + density: bool | None = None, + weights: _ArrayLike1DFloat_co | None = None, +) -> _Histogram2D[ScalarT]: ... +@overload +def histogram2d[ScalarT: np.inexact, BinsScalarT: _Number_co]( + x: _ArrayLike1D[ScalarT], + y: _ArrayLike1D[ScalarT], + bins: Sequence[_ArrayLike1D[BinsScalarT] | int], + range: _ArrayLike2DFloat_co | None = None, + density: bool | None = None, + weights: _ArrayLike1DFloat_co | None = None, +) -> _Histogram2D[ScalarT | BinsScalarT]: ... +@overload +def histogram2d[ScalarT: np.inexact]( + x: _ArrayLike1D[ScalarT], + y: _ArrayLike1D[ScalarT], + bins: Sequence[_ArrayLike1DNumber_co | int], + range: _ArrayLike2DFloat_co | None = None, + density: bool | None = None, + weights: _ArrayLike1DFloat_co | None = None, +) -> _Histogram2D[ScalarT | Any]: ... +@overload +def histogram2d[ScalarT: _Number_co]( + x: _ArrayLike1DInt_co | Sequence[float], + y: _ArrayLike1DInt_co | Sequence[float], + bins: Sequence[_ArrayLike1D[ScalarT] | int], + range: _ArrayLike2DFloat_co | None = None, + density: bool | None = None, + weights: _ArrayLike1DFloat_co | None = None, +) -> _Histogram2D[np.float64 | ScalarT]: ... @overload def histogram2d( x: _ArrayLike1DInt_co | Sequence[float], y: _ArrayLike1DInt_co | Sequence[float], - bins: Sequence[_ArrayLike1D[_NumberCoT] | int], - range: _ArrayLike2DFloat_co | None = ..., - density: bool | None = ..., - weights: _ArrayLike1DFloat_co | None = ..., -) -> tuple[ - NDArray[float64], - NDArray[_NumberCoT | float64], - NDArray[_NumberCoT | float64], -]: ... + bins: Sequence[_ArrayLike1DNumber_co | int], + range: _ArrayLike2DFloat_co | None = None, + density: bool | None = None, + weights: _ArrayLike1DFloat_co | None = None, +) -> _Histogram2D[np.float64 | Any]: ... @overload -def histogram2d( +def histogram2d[ScalarT: _Number_co]( x: Sequence[complex], y: Sequence[complex], - bins: Sequence[_ArrayLike1D[_NumberCoT] | int], - range: _ArrayLike2DFloat_co | None = ..., - density: bool | None = ..., - weights: _ArrayLike1DFloat_co | None = ..., -) -> tuple[ - NDArray[float64], - NDArray[_NumberCoT | complex128 | float64], - NDArray[_NumberCoT | complex128 | float64], -]: ... + bins: Sequence[_ArrayLike1D[ScalarT] | int], + range: _ArrayLike2DFloat_co | None = None, + density: bool | None = None, + weights: _ArrayLike1DFloat_co | None = None, +) -> _Histogram2D[np.complex128 | ScalarT]: ... @overload def histogram2d( - x: _ArrayLike1DNumber_co, - y: _ArrayLike1DNumber_co, - bins: Sequence[Sequence[bool]], - range: _ArrayLike2DFloat_co | None = ..., - density: bool | None = ..., - weights: _ArrayLike1DFloat_co | None = ..., -) -> tuple[ - NDArray[float64], - NDArray[np.bool], - NDArray[np.bool], -]: ... + x: Sequence[complex], + y: Sequence[complex], + bins: Sequence[_ArrayLike1DNumber_co | int], + range: _ArrayLike2DFloat_co | None = None, + density: bool | None = None, + weights: _ArrayLike1DFloat_co | None = None, +) -> _Histogram2D[np.complex128 | Any]: ... @overload def histogram2d( x: _ArrayLike1DNumber_co, y: _ArrayLike1DNumber_co, bins: Sequence[Sequence[int]], - range: _ArrayLike2DFloat_co | None = ..., - density: bool | None = ..., - weights: _ArrayLike1DFloat_co | None = ..., -) -> tuple[ - NDArray[float64], - NDArray[np.int_ | np.bool], - NDArray[np.int_ | np.bool], -]: ... + range: _ArrayLike2DFloat_co | None = None, + density: bool | None = None, + weights: _ArrayLike1DFloat_co | None = None, +) -> _Histogram2D[np.int_]: ... @overload def histogram2d( x: _ArrayLike1DNumber_co, y: _ArrayLike1DNumber_co, bins: Sequence[Sequence[float]], - range: _ArrayLike2DFloat_co | None = ..., - density: bool | None = ..., - weights: _ArrayLike1DFloat_co | None = ..., -) -> tuple[ - NDArray[float64], - NDArray[np.float64 | np.int_ | np.bool], - NDArray[np.float64 | np.int_ | np.bool], -]: ... + range: _ArrayLike2DFloat_co | None = None, + density: bool | None = None, + weights: _ArrayLike1DFloat_co | None = None, +) -> _Histogram2D[np.float64 | Any]: ... @overload def histogram2d( x: _ArrayLike1DNumber_co, y: _ArrayLike1DNumber_co, bins: Sequence[Sequence[complex]], - range: _ArrayLike2DFloat_co | None = ..., - density: bool | None = ..., - weights: _ArrayLike1DFloat_co | None = ..., -) -> tuple[ - NDArray[float64], - NDArray[np.complex128 | np.float64 | np.int_ | np.bool], - NDArray[np.complex128 | np.float64 | np.int_ | np.bool], -]: ... + range: _ArrayLike2DFloat_co | None = None, + density: bool | None = None, + weights: _ArrayLike1DFloat_co | None = None, +) -> _Histogram2D[np.complex128 | Any]: ... +@overload +def histogram2d( + x: _ArrayLike1DNumber_co, + y: _ArrayLike1DNumber_co, + bins: Sequence[_ArrayLike1DNumber_co | int] | int, + range: _ArrayLike2DFloat_co | None = None, + density: bool | None = None, + weights: _ArrayLike1DFloat_co | None = None, +) -> _Histogram2D[Any]: ... # NOTE: we're assuming/demanding here the `mask_func` returns # an ndarray of shape `(n, n)`; otherwise there is the possibility # of the output tuple having more or less than 2 elements @overload -def mask_indices( - n: int, - mask_func: _MaskFunc[int], - k: int = ..., -) -> tuple[NDArray[intp], NDArray[intp]]: ... -@overload -def mask_indices( - n: int, - mask_func: _MaskFunc[_T], - k: _T, -) -> tuple[NDArray[intp], NDArray[intp]]: ... - -def tril_indices( - n: int, - k: int = ..., - m: int | None = ..., -) -> tuple[NDArray[int_], NDArray[int_]]: ... - -def tril_indices_from( - arr: NDArray[Any], - k: int = ..., -) -> tuple[NDArray[int_], NDArray[int_]]: ... +def mask_indices(n: int, mask_func: _MaskFunc[int], k: int = 0) -> _Indices2D: ... +@overload +def mask_indices[T](n: int, mask_func: _MaskFunc[T], k: T) -> _Indices2D: ... -def triu_indices( - n: int, - k: int = ..., - m: int | None = ..., -) -> tuple[NDArray[int_], NDArray[int_]]: ... +# +def tril_indices(n: int, k: int = 0, m: int | None = None) -> _Indices2D: ... +def triu_indices(n: int, k: int = 0, m: int | None = None) -> _Indices2D: ... -def triu_indices_from( - arr: NDArray[Any], - k: int = ..., -) -> tuple[NDArray[int_], NDArray[int_]]: ... +# these will accept anything with `shape: tuple[int, int]` and `ndim: int` attributes +def tril_indices_from(arr: _HasShapeAndNDim, k: int = 0) -> _Indices2D: ... +def triu_indices_from(arr: _HasShapeAndNDim, k: int = 0) -> _Indices2D: ... diff --git a/numpy/lib/_type_check_impl.py b/numpy/lib/_type_check_impl.py index 977609caa299..37192043513f 100644 --- a/numpy/lib/_type_check_impl.py +++ b/numpy/lib/_type_check_impl.py @@ -240,26 +240,26 @@ def isreal(x): Examples -------- >>> import numpy as np - >>> a = np.array([1+1j, 1+0j, 4.5, 3, 2, 2j], dtype=complex) + >>> a = np.array([1+1j, 1+0j, 4.5, 3, 2, 2j], dtype=np.complex128) >>> np.isreal(a) array([False, True, True, True, True, False]) The function does not work on string arrays. - >>> a = np.array([2j, "a"], dtype="U") + >>> a = np.array([2j, "a"], dtype=np.str_) >>> np.isreal(a) # Warns about non-elementwise comparison False - Returns True for all elements in input array of ``dtype=object`` even if + Returns True for all elements in input array of ``dtype=np.object_`` even if any of the elements is complex. - >>> a = np.array([1, "2", 3+4j], dtype=object) + >>> a = np.array([1, "2", 3+4j], dtype=np.object_) >>> np.isreal(a) array([ True, True, True]) isreal should not be used with object arrays - >>> a = np.array([1+2j, 2+1j], dtype=object) + >>> a = np.array([1+2j, 2+1j], dtype=np.object_) >>> np.isreal(a) array([ True, True]) @@ -398,15 +398,15 @@ def nan_to_num(x, copy=True, nan=0.0, posinf=None, neginf=None): in-place (False). The in-place operation only occurs if casting to an array does not require a copy. Default is True. - nan : int, float, optional - Value to be used to fill NaN values. If no value is passed + nan : int, float, or bool or array_like of int, float, or bool, optional + Values to be used to fill NaN values. If no values are passed then NaN values will be replaced with 0.0. - posinf : int, float, optional - Value to be used to fill positive infinity values. If no value is + posinf : int, float, or bool or array_like of int, float, or bool, optional + Values to be used to fill positive infinity values. If no values are passed then positive infinity values will be replaced with a very large number. - neginf : int, float, optional - Value to be used to fill negative infinity values. If no value is + neginf : int, float, or bool or array_like of int, float, or bool, optional + Values to be used to fill negative infinity values. If no values are passed then negative infinity values will be replaced with a very small (or negative) number. @@ -445,6 +445,12 @@ def nan_to_num(x, copy=True, nan=0.0, posinf=None, neginf=None): >>> np.nan_to_num(x, nan=-9999, posinf=33333333, neginf=33333333) array([ 3.3333333e+07, 3.3333333e+07, -9.9990000e+03, -1.2800000e+02, 1.2800000e+02]) + >>> nan = np.array([11, 12, -9999, 13, 14]) + >>> posinf = np.array([33333333, 11, 12, 13, 14]) + >>> neginf = np.array([11, 33333333, 12, 13, 14]) + >>> np.nan_to_num(x, nan=nan, posinf=posinf, neginf=neginf) + array([ 3.3333333e+07, 3.3333333e+07, -9.9990000e+03, -1.2800000e+02, + 1.2800000e+02]) >>> y = np.array([complex(np.inf, np.nan), np.nan, complex(np.nan, np.inf)]) array([ 1.79769313e+308, -1.79769313e+308, 0.00000000e+000, # may vary -1.28000000e+002, 1.28000000e+002]) @@ -454,6 +460,11 @@ def nan_to_num(x, copy=True, nan=0.0, posinf=None, neginf=None): 0.00000000e+000 +1.79769313e+308j]) >>> np.nan_to_num(y, nan=111111, posinf=222222) array([222222.+111111.j, 111111. +0.j, 111111.+222222.j]) + >>> nan = np.array([11, 12, 13]) + >>> posinf = np.array([21, 22, 23]) + >>> neginf = np.array([31, 32, 33]) + >>> np.nan_to_num(y, nan=nan, posinf=posinf, neginf=neginf) + array([21.+11.j, 12. +0.j, 13.+23.j]) """ x = _nx.array(x, subok=True, copy=copy) xtype = x.dtype.type diff --git a/numpy/lib/_type_check_impl.pyi b/numpy/lib/_type_check_impl.pyi index 944015e423bb..dbef0ca87280 100644 --- a/numpy/lib/_type_check_impl.pyi +++ b/numpy/lib/_type_check_impl.pyi @@ -1,9 +1,6 @@ -from collections.abc import Container, Iterable -from typing import Any, Protocol, TypeAlias, overload, type_check_only -from typing import Literal as L - from _typeshed import Incomplete -from typing_extensions import TypeVar +from collections.abc import Container, Iterable +from typing import Any, Literal as L, Protocol, overload, type_check_only import numpy as np from numpy._typing import ( @@ -32,33 +29,28 @@ __all__ = [ "typename", ] -_T = TypeVar("_T") -_T_co = TypeVar("_T_co", covariant=True) -_ScalarT = TypeVar("_ScalarT", bound=np.generic) -_ScalarT_co = TypeVar("_ScalarT_co", bound=np.generic, covariant=True) -_RealT = TypeVar("_RealT", bound=np.floating | np.integer | np.bool) - -_FloatMax32: TypeAlias = np.float32 | np.float16 -_ComplexMax128: TypeAlias = np.complex128 | np.complex64 -_RealMax64: TypeAlias = np.float64 | np.float32 | np.float16 | np.integer -_Real: TypeAlias = np.floating | np.integer -_InexactMax32: TypeAlias = np.inexact[_32Bit] | np.float16 -_NumberMax64: TypeAlias = np.number[_64Bit] | np.number[_32Bit] | np.number[_16Bit] | np.integer +type _FloatMax32 = np.float32 | np.float16 +type _ComplexMax128 = np.complex128 | np.complex64 +type _RealMax64 = np.float64 | np.float32 | np.float16 | np.integer +type _Real = np.floating | np.integer +type _ToReal = _Real | np.bool +type _InexactMax32 = np.inexact[_32Bit] | np.float16 +type _NumberMax64 = np.number[_64Bit] | np.number[_32Bit] | np.number[_16Bit] | np.integer @type_check_only -class _HasReal(Protocol[_T_co]): +class _HasReal[T](Protocol): @property - def real(self, /) -> _T_co: ... + def real(self, /) -> T: ... @type_check_only -class _HasImag(Protocol[_T_co]): +class _HasImag[T](Protocol): @property - def imag(self, /) -> _T_co: ... + def imag(self, /) -> T: ... @type_check_only -class _HasDType(Protocol[_ScalarT_co]): +class _HasDType[ScalarT: np.generic](Protocol): @property - def dtype(self, /) -> np.dtype[_ScalarT_co]: ... + def dtype(self, /) -> np.dtype[ScalarT]: ... ### @@ -66,17 +58,17 @@ def mintypecode(typechars: Iterable[str | ArrayLike], typeset: str | Container[s # @overload -def real(val: _HasReal[_T]) -> _T: ... # type: ignore[overload-overlap] +def real[T](val: _HasReal[T]) -> T: ... @overload -def real(val: _ArrayLike[_RealT]) -> NDArray[_RealT]: ... +def real[RealT: _ToReal](val: _ArrayLike[RealT]) -> NDArray[RealT]: ... @overload def real(val: ArrayLike) -> NDArray[Any]: ... # @overload -def imag(val: _HasImag[_T]) -> _T: ... # type: ignore[overload-overlap] +def imag[T](val: _HasImag[T]) -> T: ... @overload -def imag(val: _ArrayLike[_RealT]) -> NDArray[_RealT]: ... +def imag[RealT: _ToReal](val: _ArrayLike[RealT]) -> NDArray[RealT]: ... @overload def imag(val: ArrayLike) -> NDArray[Any]: ... @@ -102,29 +94,29 @@ def isrealobj(x: _HasDType[Any] | ArrayLike) -> bool: ... # @overload -def nan_to_num( - x: _ScalarT, +def nan_to_num[ScalarT: np.generic]( + x: ScalarT, copy: bool = True, nan: float = 0.0, posinf: float | None = None, neginf: float | None = None, -) -> _ScalarT: ... +) -> ScalarT: ... @overload -def nan_to_num( - x: NDArray[_ScalarT] | _NestedSequence[_ArrayLike[_ScalarT]], +def nan_to_num[ScalarT: np.generic]( + x: NDArray[ScalarT] | _NestedSequence[_ArrayLike[ScalarT]], copy: bool = True, nan: float = 0.0, posinf: float | None = None, neginf: float | None = None, -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload -def nan_to_num( - x: _SupportsArray[np.dtype[_ScalarT]], +def nan_to_num[ScalarT: np.generic]( + x: _SupportsArray[np.dtype[ScalarT]], copy: bool = True, nan: float = 0.0, posinf: float | None = None, neginf: float | None = None, -) -> _ScalarT | NDArray[_ScalarT]: ... +) -> ScalarT | NDArray[ScalarT]: ... @overload def nan_to_num( x: _NestedSequence[ArrayLike], @@ -144,101 +136,101 @@ def nan_to_num( # NOTE: The [overload-overlap] mypy error is a false positive @overload -def real_if_close(a: _ArrayLike[np.complex64], tol: float = 100) -> NDArray[np.float32 | np.complex64]: ... # type: ignore[overload-overlap] +def real_if_close(a: _ArrayLike[np.complex64], tol: float = 100) -> NDArray[np.float32 | np.complex64]: ... @overload def real_if_close(a: _ArrayLike[np.complex128], tol: float = 100) -> NDArray[np.float64 | np.complex128]: ... @overload def real_if_close(a: _ArrayLike[np.clongdouble], tol: float = 100) -> NDArray[np.longdouble | np.clongdouble]: ... @overload -def real_if_close(a: _ArrayLike[_RealT], tol: float = 100) -> NDArray[_RealT]: ... +def real_if_close[RealT: _ToReal](a: _ArrayLike[RealT], tol: float = 100) -> NDArray[RealT]: ... @overload def real_if_close(a: ArrayLike, tol: float = 100) -> NDArray[Any]: ... # @overload -def typename(char: L['S1']) -> L['character']: ... +def typename(char: L["S1"]) -> L["character"]: ... @overload -def typename(char: L['?']) -> L['bool']: ... +def typename(char: L["?"]) -> L["bool"]: ... @overload -def typename(char: L['b']) -> L['signed char']: ... +def typename(char: L["b"]) -> L["signed char"]: ... @overload -def typename(char: L['B']) -> L['unsigned char']: ... +def typename(char: L["B"]) -> L["unsigned char"]: ... @overload -def typename(char: L['h']) -> L['short']: ... +def typename(char: L["h"]) -> L["short"]: ... @overload -def typename(char: L['H']) -> L['unsigned short']: ... +def typename(char: L["H"]) -> L["unsigned short"]: ... @overload -def typename(char: L['i']) -> L['integer']: ... +def typename(char: L["i"]) -> L["integer"]: ... @overload -def typename(char: L['I']) -> L['unsigned integer']: ... +def typename(char: L["I"]) -> L["unsigned integer"]: ... @overload -def typename(char: L['l']) -> L['long integer']: ... +def typename(char: L["l"]) -> L["long integer"]: ... @overload -def typename(char: L['L']) -> L['unsigned long integer']: ... +def typename(char: L["L"]) -> L["unsigned long integer"]: ... @overload -def typename(char: L['q']) -> L['long long integer']: ... +def typename(char: L["q"]) -> L["long long integer"]: ... @overload -def typename(char: L['Q']) -> L['unsigned long long integer']: ... +def typename(char: L["Q"]) -> L["unsigned long long integer"]: ... @overload -def typename(char: L['f']) -> L['single precision']: ... +def typename(char: L["f"]) -> L["single precision"]: ... @overload -def typename(char: L['d']) -> L['double precision']: ... +def typename(char: L["d"]) -> L["double precision"]: ... @overload -def typename(char: L['g']) -> L['long precision']: ... +def typename(char: L["g"]) -> L["long precision"]: ... @overload -def typename(char: L['F']) -> L['complex single precision']: ... +def typename(char: L["F"]) -> L["complex single precision"]: ... @overload -def typename(char: L['D']) -> L['complex double precision']: ... +def typename(char: L["D"]) -> L["complex double precision"]: ... @overload -def typename(char: L['G']) -> L['complex long double precision']: ... +def typename(char: L["G"]) -> L["complex long double precision"]: ... @overload -def typename(char: L['S']) -> L['string']: ... +def typename(char: L["S"]) -> L["string"]: ... @overload -def typename(char: L['U']) -> L['unicode']: ... +def typename(char: L["U"]) -> L["unicode"]: ... @overload -def typename(char: L['V']) -> L['void']: ... +def typename(char: L["V"]) -> L["void"]: ... @overload -def typename(char: L['O']) -> L['object']: ... +def typename(char: L["O"]) -> L["object"]: ... # NOTE: The [overload-overlap] mypy errors are false positives @overload def common_type() -> type[np.float16]: ... @overload -def common_type(a0: _HasDType[np.float16], /, *ai: _HasDType[np.float16]) -> type[np.float16]: ... # type: ignore[overload-overlap] +def common_type(a0: _HasDType[np.float16], /, *ai: _HasDType[np.float16]) -> type[np.float16]: ... @overload -def common_type(a0: _HasDType[np.float32], /, *ai: _HasDType[_FloatMax32]) -> type[np.float32]: ... # type: ignore[overload-overlap] +def common_type(a0: _HasDType[np.float32], /, *ai: _HasDType[_FloatMax32]) -> type[np.float32]: ... @overload -def common_type( # type: ignore[overload-overlap] +def common_type( a0: _HasDType[np.float64 | np.integer], /, *ai: _HasDType[_RealMax64], ) -> type[np.float64]: ... @overload -def common_type( # type: ignore[overload-overlap] +def common_type( a0: _HasDType[np.longdouble], /, *ai: _HasDType[_Real], ) -> type[np.longdouble]: ... @overload -def common_type( # type: ignore[overload-overlap] +def common_type( a0: _HasDType[np.complex64], /, *ai: _HasDType[_InexactMax32], ) -> type[np.complex64]: ... @overload -def common_type( # type: ignore[overload-overlap] +def common_type( a0: _HasDType[np.complex128], /, *ai: _HasDType[_NumberMax64], ) -> type[np.complex128]: ... @overload -def common_type( # type: ignore[overload-overlap] +def common_type( a0: _HasDType[np.clongdouble], /, *ai: _HasDType[np.number], ) -> type[np.clongdouble]: ... @overload -def common_type( # type: ignore[overload-overlap] +def common_type( a0: _HasDType[_FloatMax32], array1: _HasDType[np.float32], /, @@ -259,7 +251,7 @@ def common_type( *ai: _HasDType[_Real], ) -> type[np.longdouble]: ... @overload -def common_type( # type: ignore[overload-overlap] +def common_type( a0: _HasDType[_InexactMax32], array1: _HasDType[np.complex64], /, diff --git a/numpy/lib/_ufunclike_impl.py b/numpy/lib/_ufunclike_impl.py index 695aab1b8922..569840697d81 100644 --- a/numpy/lib/_ufunclike_impl.py +++ b/numpy/lib/_ufunclike_impl.py @@ -56,15 +56,7 @@ def fix(x, out=None): array([ 2., 2., -2., -2.]) """ - # promote back to an array if flattened - res = nx.asanyarray(nx.ceil(x, out=out)) - res = nx.floor(x, out=res, where=nx.greater_equal(x, 0)) - - # when no out argument is passed and no subclasses are involved, flatten - # scalars - if out is None and type(res) is nx.ndarray: - res = res[()] - return res + return nx.trunc(x, out=out) @array_function_dispatch(_dispatcher, verify=False, module='numpy') diff --git a/numpy/lib/_ufunclike_impl.pyi b/numpy/lib/_ufunclike_impl.pyi index a673f05c010d..d48557a7b5d7 100644 --- a/numpy/lib/_ufunclike_impl.pyi +++ b/numpy/lib/_ufunclike_impl.pyi @@ -1,7 +1,7 @@ -from typing import Any, TypeVar, overload +from typing import overload +from typing_extensions import deprecated import numpy as np -from numpy import floating, object_ from numpy._typing import ( NDArray, _ArrayLikeFloat_co, @@ -11,57 +11,31 @@ from numpy._typing import ( __all__ = ["fix", "isneginf", "isposinf"] -_ArrayT = TypeVar("_ArrayT", bound=NDArray[Any]) - @overload -def fix( # type: ignore[misc] - x: _FloatLike_co, - out: None = ..., -) -> floating: ... +@deprecated("np.fix will be deprecated in NumPy 2.5 in favor of np.trunc", category=PendingDeprecationWarning) +def fix(x: _FloatLike_co, out: None = None) -> np.floating: ... @overload -def fix( - x: _ArrayLikeFloat_co, - out: None = ..., -) -> NDArray[floating]: ... +@deprecated("np.fix will be deprecated in NumPy 2.5 in favor of np.trunc", category=PendingDeprecationWarning) +def fix(x: _ArrayLikeFloat_co, out: None = None) -> NDArray[np.floating]: ... @overload -def fix( - x: _ArrayLikeObject_co, - out: None = ..., -) -> NDArray[object_]: ... +@deprecated("np.fix will be deprecated in NumPy 2.5 in favor of np.trunc", category=PendingDeprecationWarning) +def fix(x: _ArrayLikeObject_co, out: None = None) -> NDArray[np.object_]: ... @overload -def fix( - x: _ArrayLikeFloat_co | _ArrayLikeObject_co, - out: _ArrayT, -) -> _ArrayT: ... +@deprecated("np.fix will be deprecated in NumPy 2.5 in favor of np.trunc", category=PendingDeprecationWarning) +def fix[ArrayT: np.ndarray](x: _ArrayLikeFloat_co | _ArrayLikeObject_co, out: ArrayT) -> ArrayT: ... +# @overload -def isposinf( # type: ignore[misc] - x: _FloatLike_co, - out: None = ..., -) -> np.bool: ... +def isposinf(x: _FloatLike_co, out: None = None) -> np.bool: ... @overload -def isposinf( - x: _ArrayLikeFloat_co, - out: None = ..., -) -> NDArray[np.bool]: ... +def isposinf(x: _ArrayLikeFloat_co, out: None = None) -> NDArray[np.bool]: ... @overload -def isposinf( - x: _ArrayLikeFloat_co, - out: _ArrayT, -) -> _ArrayT: ... +def isposinf[ArrayT: np.ndarray](x: _ArrayLikeFloat_co, out: ArrayT) -> ArrayT: ... +# @overload -def isneginf( # type: ignore[misc] - x: _FloatLike_co, - out: None = ..., -) -> np.bool: ... +def isneginf(x: _FloatLike_co, out: None = None) -> np.bool: ... @overload -def isneginf( - x: _ArrayLikeFloat_co, - out: None = ..., -) -> NDArray[np.bool]: ... +def isneginf(x: _ArrayLikeFloat_co, out: None = None) -> NDArray[np.bool]: ... @overload -def isneginf( - x: _ArrayLikeFloat_co, - out: _ArrayT, -) -> _ArrayT: ... +def isneginf[ArrayT: np.ndarray](x: _ArrayLikeFloat_co, out: ArrayT) -> ArrayT: ... diff --git a/numpy/lib/_user_array_impl.py b/numpy/lib/_user_array_impl.py index f3a6c0f518be..2465f5f70b99 100644 --- a/numpy/lib/_user_array_impl.py +++ b/numpy/lib/_user_array_impl.py @@ -53,6 +53,17 @@ class container: astype """ + def __init_subclass__(cls) -> None: + # Deprecated in NumPy 2.4, 2025-11-24 + import warnings + + warnings.warn( + "The numpy.lib.user_array.container class is deprecated and will be " + "removed in a future version.", + DeprecationWarning, + stacklevel=2, + ) + def __init__(self, data, dtype=None, copy=True): self.array = array(data, dtype, copy=copy) diff --git a/numpy/lib/_user_array_impl.pyi b/numpy/lib/_user_array_impl.pyi index c1c72b2320f1..4a6dfffbea92 100644 --- a/numpy/lib/_user_array_impl.pyi +++ b/numpy/lib/_user_array_impl.pyi @@ -1,8 +1,7 @@ -from types import EllipsisType -from typing import Any, Generic, Self, SupportsIndex, TypeAlias, overload - from _typeshed import Incomplete -from typing_extensions import TypeVar, deprecated, override +from types import EllipsisType +from typing import Any, Generic, Self, SupportsIndex, overload, override +from typing_extensions import TypeVar, deprecated import numpy as np import numpy.typing as npt @@ -12,33 +11,30 @@ from numpy._typing import ( _ArrayLikeBool_co, _ArrayLikeInt_co, _DTypeLike, + _Shape, ) ### -_ScalarT = TypeVar("_ScalarT", bound=np.generic) -_ShapeT = TypeVar("_ShapeT", bound=tuple[int, ...]) _ShapeT_co = TypeVar("_ShapeT_co", bound=tuple[int, ...], default=_AnyShape, covariant=True) -_DTypeT = TypeVar("_DTypeT", bound=np.dtype) _DTypeT_co = TypeVar("_DTypeT_co", bound=np.dtype, default=np.dtype, covariant=True) -_BoolArrayT = TypeVar("_BoolArrayT", bound=container[Any, np.dtype[np.bool]]) -_IntegralArrayT = TypeVar("_IntegralArrayT", bound=container[Any, np.dtype[np.bool | np.integer | np.object_]]) -_RealContainerT = TypeVar( - "_RealContainerT", - bound=container[Any, np.dtype[np.bool | np.integer | np.floating | np.timedelta64 | np.object_]], -) -_NumericContainerT = TypeVar("_NumericContainerT", bound=container[Any, np.dtype[np.number | np.timedelta64 | np.object_]]) +type _ArrayInt_co = npt.NDArray[np.integer | np.bool] -_ArrayInt_co: TypeAlias = npt.NDArray[np.integer | np.bool] +type _BoolContainer = container[Any, np.dtype[np.bool]] # type: ignore[deprecated] +type _IntegralContainer = container[Any, np.dtype[np.bool | np.integer | np.object_]] # type: ignore[deprecated] +type _RealContainer = container[Any, np.dtype[np.bool | np.integer | np.floating | np.timedelta64 | np.object_]] # type: ignore[deprecated] +type _NumericContainer = container[Any, np.dtype[np.number | np.timedelta64 | np.object_]] # type: ignore[deprecated] -_ToIndexSlice: TypeAlias = slice | EllipsisType | _ArrayInt_co | None -_ToIndexSlices: TypeAlias = _ToIndexSlice | tuple[_ToIndexSlice, ...] -_ToIndex: TypeAlias = SupportsIndex | _ToIndexSlice -_ToIndices: TypeAlias = _ToIndex | tuple[_ToIndex, ...] +type _ToIndexSlice = slice | EllipsisType | _ArrayInt_co | None +type _ToIndexSlices = _ToIndexSlice | tuple[_ToIndexSlice, ...] +type _ToIndex = SupportsIndex | _ToIndexSlice +type _ToIndices = _ToIndex | tuple[_ToIndex, ...] ### +# pyright: reportDeprecated = false +@deprecated("The numpy.lib.user_array.container class is deprecated and will be removed in a future version.") class container(Generic[_ShapeT_co, _DTypeT_co]): array: np.ndarray[_ShapeT_co, _DTypeT_co] @@ -51,19 +47,19 @@ class container(Generic[_ShapeT_co, _DTypeT_co]): copy: bool = True, ) -> None: ... @overload - def __init__( - self: container[Any, np.dtype[_ScalarT]], + def __init__[ScalarT: np.generic]( + self: container[Any, np.dtype[ScalarT]], /, - data: _ArrayLike[_ScalarT], + data: _ArrayLike[ScalarT], dtype: None = None, copy: bool = True, ) -> None: ... @overload - def __init__( - self: container[Any, np.dtype[_ScalarT]], + def __init__[ScalarT: np.generic]( + self: container[Any, np.dtype[ScalarT]], /, data: npt.ArrayLike, - dtype: _DTypeLike[_ScalarT], + dtype: _DTypeLike[ScalarT], copy: bool = True, ) -> None: ... @overload @@ -111,20 +107,28 @@ class container(Generic[_ShapeT_co, _DTypeT_co]): # keep in sync with np.ndarray @overload - def __abs__(self: container[_ShapeT, np.dtype[np.complex64]], /) -> container[_ShapeT, np.dtype[np.float32]]: ... # type: ignore[overload-overlap] + def __abs__[ShapeT: _Shape]( + self: container[ShapeT, np.dtype[np.complex64]], / + ) -> container[ShapeT, np.dtype[np.float32]]: ... @overload - def __abs__(self: container[_ShapeT, np.dtype[np.complex128]], /) -> container[_ShapeT, np.dtype[np.float64]]: ... + def __abs__[ShapeT: _Shape]( + self: container[ShapeT, np.dtype[np.complex128]], / + ) -> container[ShapeT, np.dtype[np.float64]]: ... @overload - def __abs__(self: container[_ShapeT, np.dtype[np.complex192]], /) -> container[_ShapeT, np.dtype[np.float96]]: ... + def __abs__[ShapeT: _Shape]( + self: container[ShapeT, np.dtype[np.complex192]], / + ) -> container[ShapeT, np.dtype[np.float96]]: ... @overload - def __abs__(self: container[_ShapeT, np.dtype[np.complex256]], /) -> container[_ShapeT, np.dtype[np.float128]]: ... + def __abs__[ShapeT: _Shape]( + self: container[ShapeT, np.dtype[np.complex256]], / + ) -> container[ShapeT, np.dtype[np.float128]]: ... @overload - def __abs__(self: _RealContainerT, /) -> _RealContainerT: ... + def __abs__[ContainerT: _RealContainer](self: ContainerT, /) -> ContainerT: ... # - def __neg__(self: _NumericContainerT, /) -> _NumericContainerT: ... # noqa: PYI019 - def __pos__(self: _NumericContainerT, /) -> _NumericContainerT: ... # noqa: PYI019 - def __invert__(self: _IntegralArrayT, /) -> _IntegralArrayT: ... # noqa: PYI019 + def __neg__[ContainerT: _NumericContainer](self: ContainerT, /) -> ContainerT: ... # noqa: PYI019 + def __pos__[ContainerT: _NumericContainer](self: ContainerT, /) -> ContainerT: ... # noqa: PYI019 + def __invert__[ContainerT: _IntegralContainer](self: ContainerT, /) -> ContainerT: ... # noqa: PYI019 # TODO(jorenham): complete these binary ops @@ -169,40 +173,34 @@ class container(Generic[_ShapeT_co, _DTypeT_co]): # @overload - def __and__( - self: container[Any, np.dtype[np.bool]], other: _ArrayLikeBool_co, / - ) -> container[_AnyShape, np.dtype[np.bool]]: ... + def __and__(self: _BoolContainer, other: _ArrayLikeBool_co, /) -> container[_AnyShape, np.dtype[np.bool]]: ... @overload def __and__(self, other: _ArrayLikeInt_co, /) -> container[_AnyShape, np.dtype[np.bool | np.integer]]: ... __rand__ = __and__ @overload - def __iand__(self: _BoolArrayT, other: _ArrayLikeBool_co, /) -> _BoolArrayT: ... + def __iand__[ContainerT: _BoolContainer](self: ContainerT, other: _ArrayLikeBool_co, /) -> ContainerT: ... @overload def __iand__(self, other: _ArrayLikeInt_co, /) -> Self: ... # @overload - def __xor__( - self: container[Any, np.dtype[np.bool]], other: _ArrayLikeBool_co, / - ) -> container[_AnyShape, np.dtype[np.bool]]: ... + def __xor__(self: _BoolContainer, other: _ArrayLikeBool_co, /) -> container[_AnyShape, np.dtype[np.bool]]: ... @overload def __xor__(self, other: _ArrayLikeInt_co, /) -> container[_AnyShape, np.dtype[np.bool | np.integer]]: ... __rxor__ = __xor__ @overload - def __ixor__(self: _BoolArrayT, other: _ArrayLikeBool_co, /) -> _BoolArrayT: ... + def __ixor__[ContainerT: _BoolContainer](self: ContainerT, other: _ArrayLikeBool_co, /) -> ContainerT: ... @overload def __ixor__(self, other: _ArrayLikeInt_co, /) -> Self: ... # @overload - def __or__( - self: container[Any, np.dtype[np.bool]], other: _ArrayLikeBool_co, / - ) -> container[_AnyShape, np.dtype[np.bool]]: ... + def __or__(self: _BoolContainer, other: _ArrayLikeBool_co, /) -> container[_AnyShape, np.dtype[np.bool]]: ... @overload def __or__(self, other: _ArrayLikeInt_co, /) -> container[_AnyShape, np.dtype[np.bool | np.integer]]: ... __ror__ = __or__ @overload - def __ior__(self: _BoolArrayT, other: _ArrayLikeBool_co, /) -> _BoolArrayT: ... + def __ior__[ContainerT: _BoolContainer](self: ContainerT, other: _ArrayLikeBool_co, /) -> ContainerT: ... @overload def __ior__(self, other: _ArrayLikeInt_co, /) -> Self: ... @@ -210,18 +208,18 @@ class container(Generic[_ShapeT_co, _DTypeT_co]): @overload def __array__(self, /, t: None = None) -> np.ndarray[_ShapeT_co, _DTypeT_co]: ... @overload - def __array__(self, /, t: _DTypeT) -> np.ndarray[_ShapeT_co, _DTypeT]: ... + def __array__[DTypeT: np.dtype](self, /, t: DTypeT) -> np.ndarray[_ShapeT_co, DTypeT]: ... # @overload def __array_wrap__(self, arg0: npt.ArrayLike, /) -> container[_ShapeT_co, _DTypeT_co]: ... @overload - def __array_wrap__(self, a: np.ndarray[_ShapeT, _DTypeT], c: Any = ..., s: Any = ..., /) -> container[_ShapeT, _DTypeT]: ... + def __array_wrap__[ShapeT: _Shape, DTypeT: np.dtype]( + self, a: np.ndarray[ShapeT, DTypeT], c: Any = ..., s: Any = ..., / + ) -> container[ShapeT, DTypeT]: ... # def copy(self, /) -> Self: ... - @deprecated("tostring() is deprecated. Use tobytes() instead.") - def tostring(self, /) -> bytes: ... def tobytes(self, /) -> bytes: ... def byteswap(self, /) -> Self: ... - def astype(self, /, typecode: _DTypeLike[_ScalarT]) -> container[_ShapeT_co, np.dtype[_ScalarT]]: ... + def astype[ScalarT: np.generic](self, /, typecode: _DTypeLike[ScalarT]) -> container[_ShapeT_co, np.dtype[ScalarT]]: ... diff --git a/numpy/lib/_utils_impl.py b/numpy/lib/_utils_impl.py index 2e1ee23d7d58..164aa4ee3d8c 100644 --- a/numpy/lib/_utils_impl.py +++ b/numpy/lib/_utils_impl.py @@ -61,6 +61,11 @@ def show_runtime(): "not_found": features_not_found } }) + config_found.append({ + "ignore_floating_point_errors_in_matmul": + not np._core._multiarray_umath._blas_supports_fpe(None), + }) + try: from threadpoolctl import threadpool_info config_found.extend(threadpool_info()) diff --git a/numpy/lib/_utils_impl.pyi b/numpy/lib/_utils_impl.pyi index 00ed47c9fb67..87fbc3aa5c4c 100644 --- a/numpy/lib/_utils_impl.pyi +++ b/numpy/lib/_utils_impl.pyi @@ -1,10 +1,18 @@ from _typeshed import SupportsWrite +from typing import LiteralString -from numpy._typing import DTypeLike +import numpy as np __all__ = ["get_include", "info", "show_runtime"] -def get_include() -> str: ... +def get_include() -> LiteralString: ... def show_runtime() -> None: ... -def info(object: object = ..., maxwidth: int = ..., output: SupportsWrite[str] | None = ..., toplevel: str = ...) -> None: ... -def drop_metadata(dtype: DTypeLike, /) -> DTypeLike: ... +def info( + object: object = None, maxwidth: int = 76, output: SupportsWrite[str] | None = None, toplevel: str = "numpy" +) -> None: ... +def drop_metadata[DTypeT: np.dtype](dtype: DTypeT, /) -> DTypeT: ... + +# used internally by `lib._function_base_impl._median` +def _median_nancheck[ScalarOrArrayT: np.generic | np.ndarray]( + data: np.ndarray, result: ScalarOrArrayT, axis: int +) -> ScalarOrArrayT: ... diff --git a/numpy/lib/_version.py b/numpy/lib/_version.py index f7a353868fd2..d70a61040a40 100644 --- a/numpy/lib/_version.py +++ b/numpy/lib/_version.py @@ -22,8 +22,7 @@ class NumpyVersion: - Release candidates: '1.8.0rc1', '1.8.0rc2', etc. - Development versions: '1.8.0.dev-f1234afa' (git commit hash appended) - Development versions after a1: '1.8.0a1.dev-f1234afa', - '1.8.0b2.dev-f1234afa', - '1.8.1rc1.dev-f1234afa', etc. + '1.8.0b2.dev-f1234afa', '1.8.1rc1.dev-f1234afa', etc. - Development versions (no git hash available): '1.8.0.dev-Unknown' Comparing needs to be done against a valid version string or other diff --git a/numpy/lib/array_utils.pyi b/numpy/lib/array_utils.pyi index 8adc3c5b22a6..4b9ebe334a1f 100644 --- a/numpy/lib/array_utils.pyi +++ b/numpy/lib/array_utils.pyi @@ -1,12 +1,6 @@ from ._array_utils_impl import ( __all__ as __all__, -) -from ._array_utils_impl import ( byte_bounds as byte_bounds, -) -from ._array_utils_impl import ( normalize_axis_index as normalize_axis_index, -) -from ._array_utils_impl import ( normalize_axis_tuple as normalize_axis_tuple, ) diff --git a/numpy/lib/format.pyi b/numpy/lib/format.pyi index dd9470e1e6a3..c29e18fe0581 100644 --- a/numpy/lib/format.pyi +++ b/numpy/lib/format.pyi @@ -1,66 +1,24 @@ from ._format_impl import ( ARRAY_ALIGN as ARRAY_ALIGN, -) -from ._format_impl import ( BUFFER_SIZE as BUFFER_SIZE, -) -from ._format_impl import ( EXPECTED_KEYS as EXPECTED_KEYS, -) -from ._format_impl import ( GROWTH_AXIS_MAX_DIGITS as GROWTH_AXIS_MAX_DIGITS, -) -from ._format_impl import ( MAGIC_LEN as MAGIC_LEN, -) -from ._format_impl import ( MAGIC_PREFIX as MAGIC_PREFIX, -) -from ._format_impl import ( __all__ as __all__, -) -from ._format_impl import ( __doc__ as __doc__, -) -from ._format_impl import ( descr_to_dtype as descr_to_dtype, -) -from ._format_impl import ( drop_metadata as drop_metadata, -) -from ._format_impl import ( dtype_to_descr as dtype_to_descr, -) -from ._format_impl import ( header_data_from_array_1_0 as header_data_from_array_1_0, -) -from ._format_impl import ( isfileobj as isfileobj, -) -from ._format_impl import ( magic as magic, -) -from ._format_impl import ( open_memmap as open_memmap, -) -from ._format_impl import ( read_array as read_array, -) -from ._format_impl import ( read_array_header_1_0 as read_array_header_1_0, -) -from ._format_impl import ( read_array_header_2_0 as read_array_header_2_0, -) -from ._format_impl import ( read_magic as read_magic, -) -from ._format_impl import ( write_array as write_array, -) -from ._format_impl import ( write_array_header_1_0 as write_array_header_1_0, -) -from ._format_impl import ( write_array_header_2_0 as write_array_header_2_0, ) diff --git a/numpy/lib/introspect.py b/numpy/lib/introspect.py index f4a0f32a98da..816c79a669b9 100644 --- a/numpy/lib/introspect.py +++ b/numpy/lib/introspect.py @@ -34,7 +34,7 @@ def opt_func_info(func_name=None, signature=None): ... func_name="add|abs", signature="float64|complex64" ... ) >>> import json - >>> print(json.dumps(dict, indent=2)) + >>> print(json.dumps(dict, indent=2)) # may vary (architecture) { "absolute": { "dd": { @@ -65,8 +65,7 @@ def opt_func_info(func_name=None, signature=None): """ import re - from numpy._core._multiarray_umath import __cpu_targets_info__ as targets - from numpy._core._multiarray_umath import dtype + from numpy._core._multiarray_umath import __cpu_targets_info__ as targets, dtype if func_name is not None: func_pattern = re.compile(func_name) @@ -87,7 +86,7 @@ def opt_func_info(func_name=None, signature=None): sig_pattern.search(c) or sig_pattern.search(dtype(c).name) for c in chars ): - matching_chars[chars] = targets + matching_chars[chars] = targets # noqa: PERF403 if matching_chars: matching_sigs[k] = matching_chars else: diff --git a/numpy/lib/mixins.py b/numpy/lib/mixins.py index 831bb34cfb55..cd02bf7f4a50 100644 --- a/numpy/lib/mixins.py +++ b/numpy/lib/mixins.py @@ -1,6 +1,7 @@ """ Mixin classes for custom array types that don't inherit from ndarray. """ +from numpy._core import umath as um __all__ = ['NDArrayOperatorsMixin'] @@ -69,7 +70,7 @@ class NDArrayOperatorsMixin: but that should support arithmetic and numpy universal functions like arrays as described in :external+neps:doc:`nep-0013-ufunc-overrides`. - As an trivial example, consider this implementation of an ``ArrayLike`` + As a trivial example, consider this implementation of an ``ArrayLike`` class that simply wraps a NumPy array and ensures that the result of any arithmetic operation is also an ``ArrayLike`` object: @@ -135,7 +136,6 @@ class that simply wraps a NumPy array and ensures that the result of any ArrayLike preserve a well-defined casting hierarchy. """ - from numpy._core import umath as um __slots__ = () # Like np.ndarray, this mixin class implements "Option 1" from the ufunc diff --git a/numpy/lib/mixins.pyi b/numpy/lib/mixins.pyi index 4f4801feac8f..e508a5cfd4bb 100644 --- a/numpy/lib/mixins.pyi +++ b/numpy/lib/mixins.pyi @@ -1,6 +1,5 @@ from abc import ABC, abstractmethod -from typing import Any -from typing import Literal as L +from typing import Any, Literal as L, type_check_only from numpy import ufunc @@ -14,11 +13,15 @@ __all__ = ["NDArrayOperatorsMixin"] # As such, only little type safety can be provided here. class NDArrayOperatorsMixin(ABC): + __slots__ = () + + @type_check_only @abstractmethod def __array_ufunc__( self, ufunc: ufunc, method: L["__call__", "reduce", "reduceat", "accumulate", "outer", "at"], + /, *inputs: Any, **kwargs: Any, ) -> Any: ... diff --git a/numpy/lib/npyio.pyi b/numpy/lib/npyio.pyi index 49fb4d1fc736..fd3ae8f5a287 100644 --- a/numpy/lib/npyio.pyi +++ b/numpy/lib/npyio.pyi @@ -1,9 +1,5 @@ from numpy.lib._npyio_impl import ( DataSource as DataSource, -) -from numpy.lib._npyio_impl import ( NpzFile as NpzFile, -) -from numpy.lib._npyio_impl import ( __doc__ as __doc__, ) diff --git a/numpy/lib/recfunctions.pyi b/numpy/lib/recfunctions.pyi index 073642918af3..3ba63bdb91dd 100644 --- a/numpy/lib/recfunctions.pyi +++ b/numpy/lib/recfunctions.pyi @@ -1,12 +1,11 @@ -from collections.abc import Callable, Iterable, Mapping, Sequence -from typing import Any, Literal, TypeAlias, overload - from _typeshed import Incomplete -from typing_extensions import TypeVar +from collections.abc import Callable, Iterable, Mapping, Sequence +from typing import Any, Literal, overload import numpy as np import numpy.typing as npt -from numpy._typing import _AnyShape, _DTypeLike, _DTypeLikeVoid +from numpy import _CastingKind +from numpy._typing import _AnyShape, _DTypeLike, _DTypeLikeVoid, _Shape from numpy.ma.mrecords import MaskedRecords __all__ = [ @@ -33,26 +32,18 @@ __all__ = [ "unstructured_to_structured", ] -_T = TypeVar("_T") -_ShapeT = TypeVar("_ShapeT", bound=tuple[int, ...]) -_ScalarT = TypeVar("_ScalarT", bound=np.generic) -_DTypeT = TypeVar("_DTypeT", bound=np.dtype) -_ArrayT = TypeVar("_ArrayT", bound=npt.NDArray[Any]) -_VoidArrayT = TypeVar("_VoidArrayT", bound=npt.NDArray[np.void]) -_NonVoidDTypeT = TypeVar("_NonVoidDTypeT", bound=_NonVoidDType) - -_OneOrMany: TypeAlias = _T | Iterable[_T] -_BuiltinSequence: TypeAlias = tuple[_T, ...] | list[_T] +type _OneOrMany[T] = T | Iterable[T] +type _BuiltinSequence[T] = tuple[T, ...] | list[T] -_NestedNames: TypeAlias = tuple[str | _NestedNames, ...] -_NonVoid: TypeAlias = np.bool | np.number | np.character | np.datetime64 | np.timedelta64 | np.object_ -_NonVoidDType: TypeAlias = np.dtype[_NonVoid] | np.dtypes.StringDType +type _NestedNames = tuple[str | _NestedNames, ...] +type _NonVoid = np.bool | np.number | np.character | np.datetime64 | np.timedelta64 | np.object_ +type _NonVoidDType = np.dtype[_NonVoid] | np.dtypes.StringDType -_JoinType: TypeAlias = Literal["inner", "outer", "leftouter"] +type _JoinType = Literal["inner", "outer", "leftouter"] ### -def recursive_fill_fields(input: npt.NDArray[np.void], output: _VoidArrayT) -> _VoidArrayT: ... +def recursive_fill_fields[VoidArrayT: npt.NDArray[np.void]](input: npt.NDArray[np.void], output: VoidArrayT) -> VoidArrayT: ... # def get_names(adtype: np.dtype[np.void]) -> _NestedNames: ... @@ -60,7 +51,7 @@ def get_names_flat(adtype: np.dtype[np.void]) -> tuple[str, ...]: ... # @overload -def flatten_descr(ndtype: _NonVoidDTypeT) -> tuple[tuple[Literal[""], _NonVoidDTypeT]]: ... +def flatten_descr[NonVoidDTypeT: _NonVoidDType](ndtype: NonVoidDTypeT) -> tuple[tuple[Literal[""], NonVoidDTypeT]]: ... @overload def flatten_descr(ndtype: np.dtype[np.void]) -> tuple[tuple[str, np.dtype]]: ... @@ -73,13 +64,13 @@ def get_fieldstructure( # @overload -def merge_arrays( - seqarrays: Sequence[np.ndarray[_ShapeT, np.dtype]] | np.ndarray[_ShapeT, np.dtype], +def merge_arrays[ShapeT: _Shape]( + seqarrays: Sequence[np.ndarray[ShapeT, np.dtype]] | np.ndarray[ShapeT, np.dtype], fill_value: float = -1, flatten: bool = False, usemask: bool = False, asrecarray: bool = False, -) -> np.recarray[_ShapeT, np.dtype[np.void]]: ... +) -> np.recarray[ShapeT, np.dtype[np.void]]: ... @overload def merge_arrays( seqarrays: Sequence[npt.ArrayLike] | np.void, @@ -91,64 +82,64 @@ def merge_arrays( # @overload -def drop_fields( - base: np.ndarray[_ShapeT, np.dtype[np.void]], +def drop_fields[ShapeT: _Shape]( + base: np.ndarray[ShapeT, np.dtype[np.void]], drop_names: str | Iterable[str], usemask: bool = True, asrecarray: Literal[False] = False, -) -> np.ndarray[_ShapeT, np.dtype[np.void]]: ... +) -> np.ndarray[ShapeT, np.dtype[np.void]]: ... @overload -def drop_fields( - base: np.ndarray[_ShapeT, np.dtype[np.void]], +def drop_fields[ShapeT: _Shape]( + base: np.ndarray[ShapeT, np.dtype[np.void]], drop_names: str | Iterable[str], usemask: bool, asrecarray: Literal[True], -) -> np.recarray[_ShapeT, np.dtype[np.void]]: ... +) -> np.recarray[ShapeT, np.dtype[np.void]]: ... @overload -def drop_fields( - base: np.ndarray[_ShapeT, np.dtype[np.void]], +def drop_fields[ShapeT: _Shape]( + base: np.ndarray[ShapeT, np.dtype[np.void]], drop_names: str | Iterable[str], usemask: bool = True, *, asrecarray: Literal[True], -) -> np.recarray[_ShapeT, np.dtype[np.void]]: ... +) -> np.recarray[ShapeT, np.dtype[np.void]]: ... # @overload -def rename_fields( - base: MaskedRecords[_ShapeT, np.dtype[np.void]], +def rename_fields[ShapeT: _Shape]( + base: MaskedRecords[ShapeT, np.dtype[np.void]], namemapper: Mapping[str, str], -) -> MaskedRecords[_ShapeT, np.dtype[np.void]]: ... +) -> MaskedRecords[ShapeT, np.dtype[np.void]]: ... @overload -def rename_fields( - base: np.ma.MaskedArray[_ShapeT, np.dtype[np.void]], +def rename_fields[ShapeT: _Shape]( + base: np.ma.MaskedArray[ShapeT, np.dtype[np.void]], namemapper: Mapping[str, str], -) -> np.ma.MaskedArray[_ShapeT, np.dtype[np.void]]: ... +) -> np.ma.MaskedArray[ShapeT, np.dtype[np.void]]: ... @overload -def rename_fields( - base: np.recarray[_ShapeT, np.dtype[np.void]], +def rename_fields[ShapeT: _Shape]( + base: np.recarray[ShapeT, np.dtype[np.void]], namemapper: Mapping[str, str], -) -> np.recarray[_ShapeT, np.dtype[np.void]]: ... +) -> np.recarray[ShapeT, np.dtype[np.void]]: ... @overload -def rename_fields( - base: np.ndarray[_ShapeT, np.dtype[np.void]], +def rename_fields[ShapeT: _Shape]( + base: np.ndarray[ShapeT, np.dtype[np.void]], namemapper: Mapping[str, str], -) -> np.ndarray[_ShapeT, np.dtype[np.void]]: ... +) -> np.ndarray[ShapeT, np.dtype[np.void]]: ... # @overload -def append_fields( - base: np.ndarray[_ShapeT, np.dtype[np.void]], +def append_fields[ShapeT: _Shape]( + base: np.ndarray[ShapeT, np.dtype[np.void]], names: _OneOrMany[str], data: _OneOrMany[npt.NDArray[Any]], dtypes: _BuiltinSequence[np.dtype] | None, fill_value: int, usemask: Literal[False], asrecarray: Literal[False] = False, -) -> np.ndarray[_ShapeT, np.dtype[np.void]]: ... +) -> np.ndarray[ShapeT, np.dtype[np.void]]: ... @overload -def append_fields( - base: np.ndarray[_ShapeT, np.dtype[np.void]], +def append_fields[ShapeT: _Shape]( + base: np.ndarray[ShapeT, np.dtype[np.void]], names: _OneOrMany[str], data: _OneOrMany[npt.NDArray[Any]], dtypes: _BuiltinSequence[np.dtype] | None = None, @@ -156,20 +147,20 @@ def append_fields( *, usemask: Literal[False], asrecarray: Literal[False] = False, -) -> np.ndarray[_ShapeT, np.dtype[np.void]]: ... +) -> np.ndarray[ShapeT, np.dtype[np.void]]: ... @overload -def append_fields( - base: np.ndarray[_ShapeT, np.dtype[np.void]], +def append_fields[ShapeT: _Shape]( + base: np.ndarray[ShapeT, np.dtype[np.void]], names: _OneOrMany[str], data: _OneOrMany[npt.NDArray[Any]], dtypes: _BuiltinSequence[np.dtype] | None, fill_value: int, usemask: Literal[False], asrecarray: Literal[True], -) -> np.recarray[_ShapeT, np.dtype[np.void]]: ... +) -> np.recarray[ShapeT, np.dtype[np.void]]: ... @overload -def append_fields( - base: np.ndarray[_ShapeT, np.dtype[np.void]], +def append_fields[ShapeT: _Shape]( + base: np.ndarray[ShapeT, np.dtype[np.void]], names: _OneOrMany[str], data: _OneOrMany[npt.NDArray[Any]], dtypes: _BuiltinSequence[np.dtype] | None = None, @@ -177,30 +168,30 @@ def append_fields( *, usemask: Literal[False], asrecarray: Literal[True], -) -> np.recarray[_ShapeT, np.dtype[np.void]]: ... +) -> np.recarray[ShapeT, np.dtype[np.void]]: ... @overload -def append_fields( - base: np.ndarray[_ShapeT, np.dtype[np.void]], +def append_fields[ShapeT: _Shape]( + base: np.ndarray[ShapeT, np.dtype[np.void]], names: _OneOrMany[str], data: _OneOrMany[npt.NDArray[Any]], dtypes: _BuiltinSequence[np.dtype] | None = None, fill_value: int = -1, usemask: Literal[True] = True, asrecarray: Literal[False] = False, -) -> np.ma.MaskedArray[_ShapeT, np.dtype[np.void]]: ... +) -> np.ma.MaskedArray[ShapeT, np.dtype[np.void]]: ... @overload -def append_fields( - base: np.ndarray[_ShapeT, np.dtype[np.void]], +def append_fields[ShapeT: _Shape]( + base: np.ndarray[ShapeT, np.dtype[np.void]], names: _OneOrMany[str], data: _OneOrMany[npt.NDArray[Any]], dtypes: _BuiltinSequence[np.dtype] | None, fill_value: int, usemask: Literal[True], asrecarray: Literal[True], -) -> MaskedRecords[_ShapeT, np.dtype[np.void]]: ... +) -> MaskedRecords[ShapeT, np.dtype[np.void]]: ... @overload -def append_fields( - base: np.ndarray[_ShapeT, np.dtype[np.void]], +def append_fields[ShapeT: _Shape]( + base: np.ndarray[ShapeT, np.dtype[np.void]], names: _OneOrMany[str], data: _OneOrMany[npt.NDArray[Any]], dtypes: _BuiltinSequence[np.dtype] | None = None, @@ -208,46 +199,46 @@ def append_fields( usemask: Literal[True] = True, *, asrecarray: Literal[True], -) -> MaskedRecords[_ShapeT, np.dtype[np.void]]: ... +) -> MaskedRecords[ShapeT, np.dtype[np.void]]: ... # -def rec_drop_fields( - base: np.ndarray[_ShapeT, np.dtype[np.void]], +def rec_drop_fields[ShapeT: _Shape]( + base: np.ndarray[ShapeT, np.dtype[np.void]], drop_names: str | Iterable[str], -) -> np.recarray[_ShapeT, np.dtype[np.void]]: ... +) -> np.recarray[ShapeT, np.dtype[np.void]]: ... # -def rec_append_fields( - base: np.ndarray[_ShapeT, np.dtype[np.void]], +def rec_append_fields[ShapeT: _Shape]( + base: np.ndarray[ShapeT, np.dtype[np.void]], names: _OneOrMany[str], data: _OneOrMany[npt.NDArray[Any]], dtypes: _BuiltinSequence[np.dtype] | None = None, -) -> np.ma.MaskedArray[_ShapeT, np.dtype[np.void]]: ... +) -> np.ma.MaskedArray[ShapeT, np.dtype[np.void]]: ... # TODO(jorenham): Stop passing `void` directly once structured dtypes are implemented, # e.g. using a `TypeVar` with constraints. # https://github.com/numpy/numtype/issues/92 @overload -def repack_fields(a: _DTypeT, align: bool = False, recurse: bool = False) -> _DTypeT: ... +def repack_fields[DTypeT: np.dtype](a: DTypeT, align: bool = False, recurse: bool = False) -> DTypeT: ... @overload -def repack_fields(a: _ScalarT, align: bool = False, recurse: bool = False) -> _ScalarT: ... +def repack_fields[ScalarT: np.generic](a: ScalarT, align: bool = False, recurse: bool = False) -> ScalarT: ... @overload -def repack_fields(a: _ArrayT, align: bool = False, recurse: bool = False) -> _ArrayT: ... +def repack_fields[ArrayT: np.ndarray](a: ArrayT, align: bool = False, recurse: bool = False) -> ArrayT: ... # TODO(jorenham): Attempt shape-typing (return type has ndim == arr.ndim + 1) @overload -def structured_to_unstructured( +def structured_to_unstructured[ScalarT: np.generic]( arr: npt.NDArray[np.void], - dtype: _DTypeLike[_ScalarT], + dtype: _DTypeLike[ScalarT], copy: bool = False, - casting: np._CastingKind = "unsafe", -) -> npt.NDArray[_ScalarT]: ... + casting: _CastingKind = "unsafe", +) -> npt.NDArray[ScalarT]: ... @overload def structured_to_unstructured( arr: npt.NDArray[np.void], dtype: npt.DTypeLike | None = None, copy: bool = False, - casting: np._CastingKind = "unsafe", + casting: _CastingKind = "unsafe", ) -> npt.NDArray[Any]: ... # @@ -269,31 +260,41 @@ def unstructured_to_structured( copy: bool = False, casting: str = "unsafe", ) -> npt.NDArray[np.void]: ... +@overload +def unstructured_to_structured( + arr: npt.NDArray[Any], + dtype: None = None, + *, + names: _OneOrMany[str], + align: bool = False, + copy: bool = False, + casting: str = "unsafe", +) -> npt.NDArray[np.void]: ... # -def apply_along_fields( - func: Callable[[np.ndarray[_ShapeT, Any]], npt.NDArray[Any]], - arr: np.ndarray[_ShapeT, np.dtype[np.void]], -) -> np.ndarray[_ShapeT, np.dtype[np.void]]: ... +def apply_along_fields[ShapeT: _Shape]( + func: Callable[[np.ndarray[ShapeT]], np.ndarray], + arr: np.ndarray[ShapeT, np.dtype[np.void]], +) -> np.ndarray[ShapeT, np.dtype[np.void]]: ... # def assign_fields_by_name(dst: npt.NDArray[np.void], src: npt.NDArray[np.void], zero_unassigned: bool = True) -> None: ... # -def require_fields( - array: np.ndarray[_ShapeT, np.dtype[np.void]], +def require_fields[ShapeT: _Shape]( + array: np.ndarray[ShapeT, np.dtype[np.void]], required_dtype: _DTypeLikeVoid, -) -> np.ndarray[_ShapeT, np.dtype[np.void]]: ... +) -> np.ndarray[ShapeT, np.dtype[np.void]]: ... # TODO(jorenham): Attempt shape-typing @overload -def stack_arrays( - arrays: _ArrayT, +def stack_arrays[ArrayT: np.ndarray]( + arrays: ArrayT, defaults: Mapping[str, object] | None = None, usemask: bool = True, asrecarray: bool = False, autoconvert: bool = False, -) -> _ArrayT: ... +) -> ArrayT: ... @overload def stack_arrays( arrays: Sequence[npt.NDArray[Any]], @@ -348,27 +349,27 @@ def stack_arrays( # @overload -def find_duplicates( - a: np.ma.MaskedArray[_ShapeT, np.dtype[np.void]], +def find_duplicates[ShapeT: _Shape]( + a: np.ma.MaskedArray[ShapeT, np.dtype[np.void]], key: str | None = None, ignoremask: bool = True, return_index: Literal[False] = False, -) -> np.ma.MaskedArray[_ShapeT, np.dtype[np.void]]: ... +) -> np.ma.MaskedArray[ShapeT, np.dtype[np.void]]: ... @overload -def find_duplicates( - a: np.ma.MaskedArray[_ShapeT, np.dtype[np.void]], +def find_duplicates[ShapeT: _Shape]( + a: np.ma.MaskedArray[ShapeT, np.dtype[np.void]], key: str | None, ignoremask: bool, return_index: Literal[True], -) -> tuple[np.ma.MaskedArray[_ShapeT, np.dtype[np.void]], np.ndarray[_ShapeT, np.dtype[np.int_]]]: ... +) -> tuple[np.ma.MaskedArray[ShapeT, np.dtype[np.void]], np.ndarray[ShapeT, np.dtype[np.int_]]]: ... @overload -def find_duplicates( - a: np.ma.MaskedArray[_ShapeT, np.dtype[np.void]], +def find_duplicates[ShapeT: _Shape]( + a: np.ma.MaskedArray[ShapeT, np.dtype[np.void]], key: str | None = None, ignoremask: bool = True, *, return_index: Literal[True], -) -> tuple[np.ma.MaskedArray[_ShapeT, np.dtype[np.void]], np.ndarray[_ShapeT, np.dtype[np.int_]]]: ... +) -> tuple[np.ma.MaskedArray[ShapeT, np.dtype[np.void]], np.ndarray[ShapeT, np.dtype[np.int_]]]: ... # @overload diff --git a/numpy/lib/scimath.pyi b/numpy/lib/scimath.pyi index 253235dfc576..ef2772a33a47 100644 --- a/numpy/lib/scimath.pyi +++ b/numpy/lib/scimath.pyi @@ -1,30 +1,12 @@ from ._scimath_impl import ( __all__ as __all__, -) -from ._scimath_impl import ( arccos as arccos, -) -from ._scimath_impl import ( arcsin as arcsin, -) -from ._scimath_impl import ( arctanh as arctanh, -) -from ._scimath_impl import ( log as log, -) -from ._scimath_impl import ( log2 as log2, -) -from ._scimath_impl import ( log10 as log10, -) -from ._scimath_impl import ( logn as logn, -) -from ._scimath_impl import ( power as power, -) -from ._scimath_impl import ( sqrt as sqrt, ) diff --git a/numpy/lib/stride_tricks.pyi b/numpy/lib/stride_tricks.pyi index 42d8fe9ef43b..eb46f28ae5f4 100644 --- a/numpy/lib/stride_tricks.pyi +++ b/numpy/lib/stride_tricks.pyi @@ -1,6 +1,4 @@ from numpy.lib._stride_tricks_impl import ( as_strided as as_strided, -) -from numpy.lib._stride_tricks_impl import ( sliding_window_view as sliding_window_view, ) diff --git a/numpy/lib/tests/test__datasource.py b/numpy/lib/tests/test__datasource.py index 65137324d1a9..2dd19410bbf0 100644 --- a/numpy/lib/tests/test__datasource.py +++ b/numpy/lib/tests/test__datasource.py @@ -89,246 +89,222 @@ def invalid_httpfile(): class TestDataSourceOpen: - def setup_method(self): - self.tmpdir = mkdtemp() - self.ds = datasource.DataSource(self.tmpdir) - - def teardown_method(self): - rmtree(self.tmpdir) - del self.ds - - def test_ValidHTTP(self): - fh = self.ds.open(valid_httpurl()) + def test_ValidHTTP(self, tmp_path): + ds = datasource.DataSource(tmp_path) + fh = ds.open(valid_httpurl()) assert_(fh) fh.close() - def test_InvalidHTTP(self): + def test_InvalidHTTP(self, tmp_path): + ds = datasource.DataSource(tmp_path) url = invalid_httpurl() - assert_raises(OSError, self.ds.open, url) + assert_raises(OSError, ds.open, url) try: - self.ds.open(url) + ds.open(url) except OSError as e: # Regression test for bug fixed in r4342. assert_(e.errno is None) - def test_InvalidHTTPCacheURLError(self): - assert_raises(URLError, self.ds._cache, invalid_httpurl()) + def test_InvalidHTTPCacheURLError(self, tmp_path): + ds = datasource.DataSource(tmp_path) + assert_raises(URLError, ds._cache, invalid_httpurl()) - def test_ValidFile(self): - local_file = valid_textfile(self.tmpdir) - fh = self.ds.open(local_file) + def test_ValidFile(self, tmp_path): + ds = datasource.DataSource(tmp_path) + local_file = valid_textfile(tmp_path) + fh = ds.open(local_file) assert_(fh) fh.close() - def test_InvalidFile(self): - invalid_file = invalid_textfile(self.tmpdir) - assert_raises(OSError, self.ds.open, invalid_file) + def test_InvalidFile(self, tmp_path): + ds = datasource.DataSource(tmp_path) + invalid_file = invalid_textfile(tmp_path) + assert_raises(OSError, ds.open, invalid_file) - def test_ValidGzipFile(self): + def test_ValidGzipFile(self, tmp_path): try: import gzip except ImportError: # We don't have the gzip capabilities to test. pytest.skip() # Test datasource's internal file_opener for Gzip files. - filepath = os.path.join(self.tmpdir, 'foobar.txt.gz') + ds = datasource.DataSource(tmp_path) + filepath = os.path.join(tmp_path, 'foobar.txt.gz') fp = gzip.open(filepath, 'w') fp.write(magic_line) fp.close() - fp = self.ds.open(filepath) + fp = ds.open(filepath) result = fp.readline() fp.close() assert_equal(magic_line, result) - def test_ValidBz2File(self): + def test_ValidBz2File(self, tmp_path): try: import bz2 except ImportError: # We don't have the bz2 capabilities to test. pytest.skip() # Test datasource's internal file_opener for BZip2 files. - filepath = os.path.join(self.tmpdir, 'foobar.txt.bz2') + ds = datasource.DataSource(tmp_path) + filepath = os.path.join(tmp_path, 'foobar.txt.bz2') fp = bz2.BZ2File(filepath, 'w') fp.write(magic_line) fp.close() - fp = self.ds.open(filepath) + fp = ds.open(filepath) result = fp.readline() fp.close() assert_equal(magic_line, result) class TestDataSourceExists: - def setup_method(self): - self.tmpdir = mkdtemp() - self.ds = datasource.DataSource(self.tmpdir) - - def teardown_method(self): - rmtree(self.tmpdir) - del self.ds + def test_ValidHTTP(self, tmp_path): + ds = datasource.DataSource(tmp_path) + assert_(ds.exists(valid_httpurl())) - def test_ValidHTTP(self): - assert_(self.ds.exists(valid_httpurl())) + def test_InvalidHTTP(self, tmp_path): + ds = datasource.DataSource(tmp_path) + assert_equal(ds.exists(invalid_httpurl()), False) - def test_InvalidHTTP(self): - assert_equal(self.ds.exists(invalid_httpurl()), False) - - def test_ValidFile(self): + def test_ValidFile(self, tmp_path): # Test valid file in destpath - tmpfile = valid_textfile(self.tmpdir) - assert_(self.ds.exists(tmpfile)) + ds = datasource.DataSource(tmp_path) + tmpfile = valid_textfile(tmp_path) + assert_(ds.exists(tmpfile)) # Test valid local file not in destpath localdir = mkdtemp() tmpfile = valid_textfile(localdir) - assert_(self.ds.exists(tmpfile)) + assert_(ds.exists(tmpfile)) rmtree(localdir) - def test_InvalidFile(self): - tmpfile = invalid_textfile(self.tmpdir) - assert_equal(self.ds.exists(tmpfile), False) + def test_InvalidFile(self, tmp_path): + ds = datasource.DataSource(tmp_path) + tmpfile = invalid_textfile(tmp_path) + assert_equal(ds.exists(tmpfile), False) class TestDataSourceAbspath: - def setup_method(self): - self.tmpdir = os.path.abspath(mkdtemp()) - self.ds = datasource.DataSource(self.tmpdir) - - def teardown_method(self): - rmtree(self.tmpdir) - del self.ds - - def test_ValidHTTP(self): - scheme, netloc, upath, pms, qry, frg = urlparse(valid_httpurl()) - local_path = os.path.join(self.tmpdir, netloc, + def test_ValidHTTP(self, tmp_path): + ds = datasource.DataSource(tmp_path) + _, netloc, upath, _, _, _ = urlparse(valid_httpurl()) + local_path = os.path.join(tmp_path, netloc, upath.strip(os.sep).strip('/')) - assert_equal(local_path, self.ds.abspath(valid_httpurl())) + assert_equal(local_path, ds.abspath(valid_httpurl())) - def test_ValidFile(self): - tmpfile = valid_textfile(self.tmpdir) + def test_ValidFile(self, tmp_path): + ds = datasource.DataSource(tmp_path) + tmpfile = valid_textfile(tmp_path) tmpfilename = os.path.split(tmpfile)[-1] # Test with filename only - assert_equal(tmpfile, self.ds.abspath(tmpfilename)) + assert_equal(tmpfile, ds.abspath(tmpfilename)) # Test filename with complete path - assert_equal(tmpfile, self.ds.abspath(tmpfile)) + assert_equal(tmpfile, ds.abspath(tmpfile)) - def test_InvalidHTTP(self): - scheme, netloc, upath, pms, qry, frg = urlparse(invalid_httpurl()) - invalidhttp = os.path.join(self.tmpdir, netloc, + def test_InvalidHTTP(self, tmp_path): + ds = datasource.DataSource(tmp_path) + _, netloc, upath, _, _, _ = urlparse(invalid_httpurl()) + invalidhttp = os.path.join(tmp_path, netloc, upath.strip(os.sep).strip('/')) - assert_(invalidhttp != self.ds.abspath(valid_httpurl())) + assert_(invalidhttp != ds.abspath(valid_httpurl())) - def test_InvalidFile(self): - invalidfile = valid_textfile(self.tmpdir) - tmpfile = valid_textfile(self.tmpdir) + def test_InvalidFile(self, tmp_path): + ds = datasource.DataSource(tmp_path) + invalidfile = valid_textfile(tmp_path) + tmpfile = valid_textfile(tmp_path) tmpfilename = os.path.split(tmpfile)[-1] # Test with filename only - assert_(invalidfile != self.ds.abspath(tmpfilename)) + assert_(invalidfile != ds.abspath(tmpfilename)) # Test filename with complete path - assert_(invalidfile != self.ds.abspath(tmpfile)) + assert_(invalidfile != ds.abspath(tmpfile)) - def test_sandboxing(self): - tmpfile = valid_textfile(self.tmpdir) + def test_sandboxing(self, tmp_path): + ds = datasource.DataSource(tmp_path) + tmpfile = valid_textfile(tmp_path) tmpfilename = os.path.split(tmpfile)[-1] - tmp_path = lambda x: os.path.abspath(self.ds.abspath(x)) + path = lambda x: os.path.abspath(ds.abspath(x)) - assert_(tmp_path(valid_httpurl()).startswith(self.tmpdir)) - assert_(tmp_path(invalid_httpurl()).startswith(self.tmpdir)) - assert_(tmp_path(tmpfile).startswith(self.tmpdir)) - assert_(tmp_path(tmpfilename).startswith(self.tmpdir)) + assert_(path(valid_httpurl()).startswith(str(tmp_path))) + assert_(path(invalid_httpurl()).startswith(str(tmp_path))) + assert_(path(tmpfile).startswith(str(tmp_path))) + assert_(path(tmpfilename).startswith(str(tmp_path))) for fn in malicious_files: - assert_(tmp_path(http_path + fn).startswith(self.tmpdir)) - assert_(tmp_path(fn).startswith(self.tmpdir)) + assert_(path(http_path + fn).startswith(str(tmp_path))) + assert_(path(fn).startswith(str(tmp_path))) - def test_windows_os_sep(self): + def test_windows_os_sep(self, tmp_path): orig_os_sep = os.sep try: os.sep = '\\' - self.test_ValidHTTP() - self.test_ValidFile() - self.test_InvalidHTTP() - self.test_InvalidFile() - self.test_sandboxing() + self.test_ValidHTTP(tmp_path) + self.test_ValidFile(tmp_path) + self.test_InvalidHTTP(tmp_path) + self.test_InvalidFile(tmp_path) + self.test_sandboxing(tmp_path) finally: os.sep = orig_os_sep class TestRepositoryAbspath: - def setup_method(self): - self.tmpdir = os.path.abspath(mkdtemp()) - self.repos = datasource.Repository(valid_baseurl(), self.tmpdir) - - def teardown_method(self): - rmtree(self.tmpdir) - del self.repos - - def test_ValidHTTP(self): - scheme, netloc, upath, pms, qry, frg = urlparse(valid_httpurl()) - local_path = os.path.join(self.repos._destpath, netloc, + def test_ValidHTTP(self, tmp_path): + repos = datasource.Repository(valid_baseurl(), tmp_path) + _, netloc, upath, _, _, _ = urlparse(valid_httpurl()) + local_path = os.path.join(repos._destpath, netloc, upath.strip(os.sep).strip('/')) - filepath = self.repos.abspath(valid_httpfile()) + filepath = repos.abspath(valid_httpfile()) assert_equal(local_path, filepath) - def test_sandboxing(self): - tmp_path = lambda x: os.path.abspath(self.repos.abspath(x)) - assert_(tmp_path(valid_httpfile()).startswith(self.tmpdir)) + def test_sandboxing(self, tmp_path): + repos = datasource.Repository(valid_baseurl(), tmp_path) + path = lambda x: os.path.abspath(repos.abspath(x)) + assert_(path(valid_httpfile()).startswith(str(tmp_path))) for fn in malicious_files: - assert_(tmp_path(http_path + fn).startswith(self.tmpdir)) - assert_(tmp_path(fn).startswith(self.tmpdir)) + assert_(path(http_path + fn).startswith(str(tmp_path))) + assert_(path(fn).startswith(str(tmp_path))) - def test_windows_os_sep(self): + def test_windows_os_sep(self, tmp_path): orig_os_sep = os.sep try: os.sep = '\\' - self.test_ValidHTTP() - self.test_sandboxing() + self.test_ValidHTTP(tmp_path) + self.test_sandboxing(tmp_path) finally: os.sep = orig_os_sep class TestRepositoryExists: - def setup_method(self): - self.tmpdir = mkdtemp() - self.repos = datasource.Repository(valid_baseurl(), self.tmpdir) - - def teardown_method(self): - rmtree(self.tmpdir) - del self.repos - - def test_ValidFile(self): + def test_ValidFile(self, tmp_path): # Create local temp file - tmpfile = valid_textfile(self.tmpdir) - assert_(self.repos.exists(tmpfile)) + repos = datasource.Repository(valid_baseurl(), tmp_path) + tmpfile = valid_textfile(tmp_path) + assert_(repos.exists(tmpfile)) - def test_InvalidFile(self): - tmpfile = invalid_textfile(self.tmpdir) - assert_equal(self.repos.exists(tmpfile), False) + def test_InvalidFile(self, tmp_path): + repos = datasource.Repository(valid_baseurl(), tmp_path) + tmpfile = invalid_textfile(tmp_path) + assert_equal(repos.exists(tmpfile), False) - def test_RemoveHTTPFile(self): - assert_(self.repos.exists(valid_httpurl())) + def test_RemoveHTTPFile(self, tmp_path): + repos = datasource.Repository(valid_baseurl(), tmp_path) + assert_(repos.exists(valid_httpurl())) - def test_CachedHTTPFile(self): + def test_CachedHTTPFile(self, tmp_path): localfile = valid_httpurl() # Create a locally cached temp file with an URL based # directory structure. This is similar to what Repository.open # would do. - scheme, netloc, upath, pms, qry, frg = urlparse(localfile) - local_path = os.path.join(self.repos._destpath, netloc) + repos = datasource.Repository(valid_baseurl(), tmp_path) + _, netloc, _, _, _, _ = urlparse(localfile) + local_path = os.path.join(repos._destpath, netloc) os.mkdir(local_path, 0o0700) tmpfile = valid_textfile(local_path) - assert_(self.repos.exists(tmpfile)) + assert_(repos.exists(tmpfile)) class TestOpenFunc: - def setup_method(self): - self.tmpdir = mkdtemp() - - def teardown_method(self): - rmtree(self.tmpdir) - - def test_DataSourceOpen(self): - local_file = valid_textfile(self.tmpdir) + def test_DataSourceOpen(self, tmp_path): + local_file = valid_textfile(tmp_path) # Test case where destpath is passed in - fp = datasource.open(local_file, destpath=self.tmpdir) + fp = datasource.open(local_file, destpath=tmp_path) assert_(fp) fp.close() # Test case where default destpath is used diff --git a/numpy/lib/tests/test__iotools.py b/numpy/lib/tests/test__iotools.py index 1581ffbe95fd..2555c4b86f6c 100644 --- a/numpy/lib/tests/test__iotools.py +++ b/numpy/lib/tests/test__iotools.py @@ -1,6 +1,8 @@ import time from datetime import date +import pytest + import numpy as np from numpy.lib._iotools import ( LineSplitter, @@ -10,12 +12,7 @@ flatten_dtype, has_nested_fields, ) -from numpy.testing import ( - assert_, - assert_allclose, - assert_equal, - assert_raises, -) +from numpy.testing import assert_, assert_allclose, assert_equal, assert_raises class TestLineSplitter: @@ -205,6 +202,7 @@ def test_missing(self): except ValueError: pass + @pytest.mark.thread_unsafe(reason="monkeypatches StringConverter") def test_upgrademapper(self): "Tests updatemapper" dateparser = _bytes_to_date diff --git a/numpy/lib/tests/test_arraypad.py b/numpy/lib/tests/test_arraypad.py index 6efbe348ca81..14383e743e47 100644 --- a/numpy/lib/tests/test_arraypad.py +++ b/numpy/lib/tests/test_arraypad.py @@ -1413,3 +1413,15 @@ def test_dtype_persistence(dtype, mode): arr = np.zeros((3, 2, 1), dtype=dtype) result = np.pad(arr, 1, mode=mode) assert result.dtype == dtype + + +@pytest.mark.parametrize("input_shape, pad_width, expected_shape", [ + ((3, 4, 5), {-2: (1, 3)}, (3, 4 + 1 + 3, 5)), + ((3, 4, 5), {0: (5, 2)}, (3 + 5 + 2, 4, 5)), + ((3, 4, 5), {0: (5, 2), -1: (3, 4)}, (3 + 5 + 2, 4, 5 + 3 + 4)), + ((3, 4, 5), {1: 5}, (3, 4 + 2 * 5, 5)), +]) +def test_pad_dict_pad_width(input_shape, pad_width, expected_shape): + a = np.zeros(input_shape) + result = np.pad(a, pad_width) + assert result.shape == expected_shape diff --git a/numpy/lib/tests/test_arraysetops.py b/numpy/lib/tests/test_arraysetops.py index 7865e1b16ee9..4e8d503427de 100644 --- a/numpy/lib/tests/test_arraysetops.py +++ b/numpy/lib/tests/test_arraysetops.py @@ -5,6 +5,7 @@ import numpy as np from numpy import ediff1d, intersect1d, isin, setdiff1d, setxor1d, union1d, unique +from numpy.dtypes import StringDType from numpy.exceptions import AxisError from numpy.testing import ( assert_array_equal, @@ -725,7 +726,10 @@ def test_unique_1d(self): # test for ticket #2799 aa = [1. + 0.j, 1 - 1.j, 1] - assert_array_equal(np.unique(aa), [1. - 1.j, 1. + 0.j]) + assert_array_equal( + np.sort(np.unique(aa)), + [1. - 1.j, 1.], + ) # test for ticket #4785 a = [(1, 2), (1, 2), (2, 3)] @@ -760,7 +764,8 @@ def test_unique_1d(self): ua_idx = [2, 0, 1] ua_inv = [1, 2, 0, 2] ua_cnt = [1, 1, 2] - assert_equal(np.unique(a), ua) + # order of unique values is not guaranteed + assert_equal(np.sort(np.unique(a)), np.sort(ua)) assert_equal(np.unique(a, return_index=True), (ua, ua_idx)) assert_equal(np.unique(a, return_inverse=True), (ua, ua_inv)) assert_equal(np.unique(a, return_counts=True), (ua, ua_cnt)) @@ -771,7 +776,8 @@ def test_unique_1d(self): ua_idx = [2, 0, 3] ua_inv = [1, 2, 0, 2, 2] ua_cnt = [1, 1, 3] - assert_equal(np.unique(a), ua) + # order of unique values is not guaranteed + assert_equal(np.sort(np.unique(a)), np.sort(ua)) assert_equal(np.unique(a, return_index=True), (ua, ua_idx)) assert_equal(np.unique(a, return_inverse=True), (ua, ua_inv)) assert_equal(np.unique(a, return_counts=True), (ua, ua_cnt)) @@ -813,7 +819,9 @@ def test_unique_1d(self): def test_unique_zero_sized(self): # test for zero-sized arrays - for dt in self.get_types(): + types = self.get_types() + types.extend('SU') + for dt in types: a = np.array([], dt) b = np.array([], dt) i1 = np.array([], np.int64) @@ -838,6 +846,187 @@ class Subclass(np.ndarray): bb = Subclass(b.shape, dtype=dt, buffer=b) self.check_all(aa, bb, i1, i2, c, dt) + def test_unique_byte_string_hash_based(self): + # test for byte string arrays + arr = ['apple', 'banana', 'apple', 'cherry', 'date', 'banana', 'fig', 'grape'] + unq_sorted = ['apple', 'banana', 'cherry', 'date', 'fig', 'grape'] + + a1 = unique(arr, sorted=False) + # the result varies depending on the impl of std::unordered_set, + # so we check them by sorting + assert_array_equal(sorted(a1.tolist()), unq_sorted) + + def test_unique_unicode_string_hash_based(self): + # test for unicode string arrays + arr = [ + 'cafÊ', 'cafe', 'cafÊ', 'naïve', 'naive', + 'rÊsumÊ', 'naïve', 'resume', 'rÊsumÊ', + ] + unq_sorted = ['cafe', 'cafÊ', 'naive', 'naïve', 'resume', 'rÊsumÊ'] + + a1 = unique(arr, sorted=False) + # the result varies depending on the impl of std::unordered_set, + # so we check them by sorting + assert_array_equal(sorted(a1.tolist()), unq_sorted) + + def test_unique_vstring_hash_based_equal_nan(self): + # test for unicode and nullable string arrays (equal_nan=True) + a = np.array([ + # short strings + 'straße', + None, + 'strasse', + 'straße', + None, + 'niÃąo', + 'nino', + 'Êlève', + 'eleve', + 'niÃąo', + 'Êlève', + # medium strings + 'b' * 20, + 'ß' * 30, + None, + 'Ê' * 30, + 'e' * 20, + 'ß' * 30, + 'n' * 30, + 'Ãą' * 20, + None, + 'e' * 20, + 'Ãą' * 20, + # long strings + 'b' * 300, + 'ß' * 400, + None, + 'Ê' * 400, + 'e' * 300, + 'ß' * 400, + 'n' * 400, + 'Ãą' * 300, + None, + 'e' * 300, + 'Ãą' * 300, + ], + dtype=StringDType(na_object=None) + ) + unq_sorted_wo_none = [ + 'b' * 20, + 'b' * 300, + 'e' * 20, + 'e' * 300, + 'eleve', + 'nino', + 'niÃąo', + 'n' * 30, + 'n' * 400, + 'strasse', + 'straße', + 'ß' * 30, + 'ß' * 400, + 'Êlève', + 'Ê' * 30, + 'Ê' * 400, + 'Ãą' * 20, + 'Ãą' * 300, + ] + + a1 = unique(a, sorted=False, equal_nan=True) + # the result varies depending on the impl of std::unordered_set, + # so we check them by sorting + + # a1 should have exactly one None + count_none = sum(x is None for x in a1) + assert_equal(count_none, 1) + + a1_wo_none = sorted(x for x in a1 if x is not None) + assert_array_equal(a1_wo_none, unq_sorted_wo_none) + + def test_unique_vstring_hash_based_not_equal_nan(self): + # test for unicode and nullable string arrays (equal_nan=False) + a = np.array([ + # short strings + 'straße', + None, + 'strasse', + 'straße', + None, + 'niÃąo', + 'nino', + 'Êlève', + 'eleve', + 'niÃąo', + 'Êlève', + # medium strings + 'b' * 20, + 'ß' * 30, + None, + 'Ê' * 30, + 'e' * 20, + 'ß' * 30, + 'n' * 30, + 'Ãą' * 20, + None, + 'e' * 20, + 'Ãą' * 20, + # long strings + 'b' * 300, + 'ß' * 400, + None, + 'Ê' * 400, + 'e' * 300, + 'ß' * 400, + 'n' * 400, + 'Ãą' * 300, + None, + 'e' * 300, + 'Ãą' * 300, + ], + dtype=StringDType(na_object=None) + ) + unq_sorted_wo_none = [ + 'b' * 20, + 'b' * 300, + 'e' * 20, + 'e' * 300, + 'eleve', + 'nino', + 'niÃąo', + 'n' * 30, + 'n' * 400, + 'strasse', + 'straße', + 'ß' * 30, + 'ß' * 400, + 'Êlève', + 'Ê' * 30, + 'Ê' * 400, + 'Ãą' * 20, + 'Ãą' * 300, + ] + + a1 = unique(a, sorted=False, equal_nan=False) + # the result varies depending on the impl of std::unordered_set, + # so we check them by sorting + + # a1 should have exactly one None + count_none = sum(x is None for x in a1) + assert_equal(count_none, 6) + + a1_wo_none = sorted(x for x in a1 if x is not None) + assert_array_equal(a1_wo_none, unq_sorted_wo_none) + + def test_unique_vstring_errors(self): + a = np.array( + [ + 'apple', 'banana', 'apple', None, 'cherry', + 'date', 'banana', 'fig', None, 'grape', + ] * 2, + dtype=StringDType(na_object=None) + ) + assert_raises(ValueError, unique, a, equal_nan=False) + @pytest.mark.parametrize("arg", ["return_index", "return_inverse", "return_counts"]) def test_unsupported_hash_based(self, arg): """These currently never use the hash-based solution. However, @@ -1015,7 +1204,13 @@ def test_unique_nanequals(self): assert_array_equal(not_unq, np.array([1, np.nan, np.nan, np.nan])) def test_unique_array_api_functions(self): - arr = np.array([np.nan, 1, 4, 1, 3, 4, np.nan, 5, 1]) + arr = np.array( + [ + np.nan, 1.0, 0.0, 4.0, -np.nan, + -0.0, 1.0, 3.0, 4.0, np.nan, + 5.0, -0.0, 1.0, -np.nan, 0.0, + ], + ) for res_unique_array_api, res_unique in [ ( @@ -1042,8 +1237,14 @@ def test_unique_array_api_functions(self): ) ]: assert len(res_unique_array_api) == len(res_unique) + if not isinstance(res_unique_array_api, tuple): + res_unique_array_api = (res_unique_array_api,) + if not isinstance(res_unique, tuple): + res_unique = (res_unique,) + for actual, expected in zip(res_unique_array_api, res_unique): - assert_array_equal(actual, expected) + # Order of output is not guaranteed + assert_equal(np.sort(actual), np.sort(expected)) def test_unique_inverse_shape(self): # Regression test for https://github.com/numpy/numpy/issues/25552 @@ -1072,3 +1273,30 @@ def test_unique_with_matrix(self, data, transpose, dtype): u = np.unique(mat) expected = np.unique(np.asarray(mat)) assert_array_equal(u, expected, strict=True) + + def test_unique_axis0_equal_nan_on_1d_array(self): + # Test Issue #29336 + arr1d = np.array([np.nan, 0, 0, np.nan]) + expected = np.array([0., np.nan]) + result = np.unique(arr1d, axis=0, equal_nan=True) + assert_array_equal(result, expected) + + def test_unique_axis_minus1_eq_on_1d_array(self): + arr1d = np.array([np.nan, 0, 0, np.nan]) + expected = np.array([0., np.nan]) + result = np.unique(arr1d, axis=-1, equal_nan=True) + assert_array_equal(result, expected) + + def test_unique_axis_float_raises_typeerror(self): + arr1d = np.array([np.nan, 0, 0, np.nan]) + with pytest.raises(TypeError, match="integer argument expected"): + np.unique(arr1d, axis=0.0, equal_nan=False) + + @pytest.mark.parametrize('dt', [np.dtype('F'), np.dtype('D')]) + @pytest.mark.parametrize('values', [[complex(0.0, -1), complex(-0.0, -1), 0], + [-200, complex(-200, -0.0), -1], + [-25, 3, -5j, complex(-25, -0.0), 3j]]) + def test_unique_complex_signed_zeros(self, dt, values): + z = np.array(values, dtype=dt) + u = np.unique(z) + assert len(u) == len(values) - 1 diff --git a/numpy/lib/tests/test_arrayterator.py b/numpy/lib/tests/test_arrayterator.py index 800c9a2a5f77..42a85e58ff62 100644 --- a/numpy/lib/tests/test_arrayterator.py +++ b/numpy/lib/tests/test_arrayterator.py @@ -14,8 +14,7 @@ def test(): ndims = randint(5) + 1 shape = tuple(randint(10) + 1 for dim in range(ndims)) els = reduce(mul, shape) - a = np.arange(els) - a.shape = shape + a = np.arange(els).reshape(shape) buf_size = randint(2 * els) b = Arrayterator(a, buf_size) diff --git a/numpy/lib/tests/test_format.py b/numpy/lib/tests/test_format.py index d805d3493ca4..52994f13bd05 100644 --- a/numpy/lib/tests/test_format.py +++ b/numpy/lib/tests/test_format.py @@ -291,7 +291,6 @@ assert_array_equal, assert_raises, assert_raises_regex, - assert_warns, ) from numpy.testing._private.utils import requires_memory @@ -384,9 +383,6 @@ ('z', 'u1')] NbufferT = [ - # x Info color info y z - # value y2 Info2 name z2 Name Value - # name value y3 z3 ([3, 2], (6j, 6., ('nn', [6j, 4j], [6., 4.], [1, 2]), 'NN', True), 'cc', ('NN', 6j), [[6., 4.], [6., 4.]], 8), ([4, 3], (7j, 7., ('oo', [7j, 5j], [7., 5.], [2, 1]), 'OO', False), @@ -566,6 +562,8 @@ def test_python2_python3_interoperability(): assert_array_equal(data, np.ones(2)) +@pytest.mark.filterwarnings( + "ignore:.*align should be passed:numpy.exceptions.VisibleDeprecationWarning") def test_pickle_python2_python3(): # Test that loading object arrays saved on Python 2 works both on # Python 2 and Python 3 and vice versa @@ -629,17 +627,18 @@ def test_pickle_disallow(tmpdir): allow_pickle=False) @pytest.mark.parametrize('dt', [ - np.dtype(np.dtype([('a', np.int8), - ('b', np.int16), - ('c', np.int32), - ], align=True), - (3,)), - np.dtype([('x', np.dtype({'names': ['a', 'b'], + # Not testing a subarray only dtype, because it cannot be attached to an array + # (and would fail the test as of writing this.) + np.dtype([('a', np.int8), + ('b', np.int16), + ('c', np.int32), + ], align=True), + np.dtype([('x', np.dtype(({'names': ['a', 'b'], 'formats': ['i1', 'i1'], 'offsets': [0, 4], 'itemsize': 8, }, - (3,)), + (3,))), (4,), )]), np.dtype([('x', @@ -958,6 +957,7 @@ def test_large_file_support(tmpdir): @pytest.mark.skipif(not IS_64BIT, reason="test requires 64-bit system") @pytest.mark.slow @requires_memory(free_bytes=2 * 2**30) +@pytest.mark.thread_unsafe(reason="crashes with low memory") def test_large_archive(tmpdir): # Regression test for product of saving arrays with dimensions of array # having a product that doesn't fit in int32. See gh-7598 for details. @@ -1008,7 +1008,7 @@ def test_unicode_field_names(tmpdir): # notifies the user that 3.0 is selected with open(fname, 'wb') as f: - with assert_warns(UserWarning): + with pytest.warns(UserWarning): format.write_array(f, arr, version=None) def test_header_growth_axis(): @@ -1035,13 +1035,11 @@ def test_header_growth_axis(): float, np.dtype({'names': ['c'], 'formats': [np.dtype(int, metadata={})]}) ]}), ]) -@pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8), - reason="PyPy bug in error formatting") def test_metadata_dtype(dt): # gh-14142 arr = np.ones(10, dtype=dt) buf = BytesIO() - with assert_warns(UserWarning): + with pytest.warns(UserWarning): np.save(buf, arr) buf.seek(0) diff --git a/numpy/lib/tests/test_function_base.py b/numpy/lib/tests/test_function_base.py index 50c61e6e04fa..412f06d07e20 100644 --- a/numpy/lib/tests/test_function_base.py +++ b/numpy/lib/tests/test_function_base.py @@ -61,10 +61,9 @@ assert_equal, assert_raises, assert_raises_regex, - assert_warns, - suppress_warnings, ) +np_floats = [np.half, np.single, np.double, np.longdouble] def get_mat(n): data = np.arange(n) @@ -309,7 +308,7 @@ def test_basic(self): def test_order(self): # It turns out that people rely on np.copy() preserving order by # default; changing this broke scikit-learn: - # github.com/scikit-learn/scikit-learn/commit/7842748cf777412c506a8c0ed28090711d3a3783 + # github.com/scikit-learn/scikit-learn/commit/7842748 a = np.array([[1, 2], [3, 4]]) assert_(a.flags.c_contiguous) assert_(not a.flags.f_contiguous) @@ -566,10 +565,6 @@ def test_return_dtype(self): m = np.isnan(d) assert_equal(select([m], [d]), [0, 0, 0, np.nan, 0, 0]) - def test_deprecated_empty(self): - assert_raises(ValueError, select, [], [], 3j) - assert_raises(ValueError, select, [], []) - def test_non_bool_deprecation(self): choices = self.choices conditions = self.conditions[:] @@ -975,18 +970,20 @@ def test_append(self): class TestDelete: - def setup_method(self): - self.a = np.arange(5) - self.nd_a = np.arange(5).repeat(2).reshape(1, 5, 2) + def _create_arrays(self): + a = np.arange(5) + nd_a = np.arange(5).repeat(2).reshape(1, 5, 2) + return a, nd_a def _check_inverse_of_slicing(self, indices): - a_del = delete(self.a, indices) - nd_a_del = delete(self.nd_a, indices, axis=1) + a, nd_a = self._create_arrays() + a_del = delete(a, indices) + nd_a_del = delete(nd_a, indices, axis=1) msg = f'Delete failed for obj: {indices!r}' - assert_array_equal(setxor1d(a_del, self.a[indices, ]), self.a, + assert_array_equal(setxor1d(a_del, a[indices, ]), a, err_msg=msg) - xor = setxor1d(nd_a_del[0, :, 0], self.nd_a[0, indices, 0]) - assert_array_equal(xor, self.nd_a[0, :, 0], err_msg=msg) + xor = setxor1d(nd_a_del[0, :, 0], nd_a[0, indices, 0]) + assert_array_equal(xor, nd_a[0, :, 0], err_msg=msg) def test_slices(self): lims = [-6, -2, 0, 1, 2, 4, 5] @@ -998,11 +995,12 @@ def test_slices(self): self._check_inverse_of_slicing(s) def test_fancy(self): + a, _ = self._create_arrays() self._check_inverse_of_slicing(np.array([[0, 1], [2, 1]])) with pytest.raises(IndexError): - delete(self.a, [100]) + delete(a, [100]) with pytest.raises(IndexError): - delete(self.a, [-100]) + delete(a, [-100]) self._check_inverse_of_slicing([0, -1, 2, 2]) @@ -1010,13 +1008,13 @@ def test_fancy(self): # not legal, indexing with these would change the dimension with pytest.raises(ValueError): - delete(self.a, True) + delete(a, True) with pytest.raises(ValueError): - delete(self.a, False) + delete(a, False) # not enough items with pytest.raises(ValueError): - delete(self.a, [False] * 4) + delete(a, [False] * 4) def test_single(self): self._check_inverse_of_slicing(0) @@ -1032,7 +1030,9 @@ def test_0d(self): def test_subclass(self): class SubClass(np.ndarray): pass - a = self.a.view(SubClass) + + a_orig, _ = self._create_arrays() + a = a_orig.view(SubClass) assert_(isinstance(delete(a, 0), SubClass)) assert_(isinstance(delete(a, []), SubClass)) assert_(isinstance(delete(a, [0, 1]), SubClass)) @@ -1057,12 +1057,13 @@ def test_index_floats(self): @pytest.mark.parametrize("indexer", [np.array([1]), [1]]) def test_single_item_array(self, indexer): - a_del_int = delete(self.a, 1) - a_del = delete(self.a, indexer) + a, nd_a = self._create_arrays() + a_del_int = delete(a, 1) + a_del = delete(a, indexer) assert_equal(a_del_int, a_del) - nd_a_del_int = delete(self.nd_a, 1, axis=1) - nd_a_del = delete(self.nd_a, np.array([1]), axis=1) + nd_a_del_int = delete(nd_a, 1, axis=1) + nd_a_del = delete(nd_a, np.array([1]), axis=1) assert_equal(nd_a_del_int, nd_a_del) def test_single_item_array_non_int(self): @@ -1181,8 +1182,8 @@ def test_second_order_accurate(self): assert_(np.all(num_error < 0.03) == True) # test with unevenly spaced - np.random.seed(0) - x = np.sort(np.random.random(10)) + rng = np.random.default_rng(0) + x = np.sort(rng.random(10)) y = 2 * x ** 3 + 4 * x ** 2 + 2 * x analytical = 6 * x ** 2 + 8 * x + 2 num_error = np.abs((np.gradient(y, x, edge_order=2) / analytical) - 1) @@ -1379,6 +1380,36 @@ class TestTrimZeros: c = a.astype(complex) d = a.astype(object) + def construct_input_output(self, rng, shape, axis, trim): + """Construct an input/output test pair for trim_zeros""" + # Standardize axis to a tuple. + if axis is None: + axis = tuple(range(len(shape))) + elif isinstance(axis, int): + axis = (len(shape) + axis if axis < 0 else axis,) + else: + axis = tuple(len(shape) + ax if ax < 0 else ax for ax in axis) + + # Populate a random interior slice with nonzero entries. + data = np.zeros(shape) + i_start = rng.integers(low=0, high=np.array(shape) - 1) + i_end = rng.integers(low=i_start + 1, high=shape) + inner_shape = tuple(i_end - i_start) + inner_data = 1 + rng.random(inner_shape) + data[tuple(slice(i, j) for i, j in zip(i_start, i_end))] = inner_data + + # Construct the expected output of N-dimensional trim_zeros + # with the given axis and trim arguments. + if 'f' not in trim: + i_start = np.array([None for _ in shape]) + if 'b' not in trim: + i_end = np.array([None for _ in shape]) + idx = tuple(slice(i, j) if ax in axis else slice(None) + for ax, (i, j) in enumerate(zip(i_start, i_end))) + expected = data[idx] + + return data, expected + def values(self): attr_names = ('a', 'b', 'c', 'd') return (getattr(self, name) for name in attr_names) @@ -1464,6 +1495,29 @@ def test_unexpected_trim_value(self, trim): with pytest.raises(ValueError, match=r"unexpected character\(s\) in `trim`"): trim_zeros(arr, trim=trim) + @pytest.mark.parametrize("shape, axis", [ + [(5,), None], + [(5,), ()], + [(5,), 0], + [(5, 6), None], + [(5, 6), ()], + [(5, 6), 0], + [(5, 6), (-1,)], + [(5, 6, 7), None], + [(5, 6, 7), ()], + [(5, 6, 7), 1], + [(5, 6, 7), (0, 2)], + [(5, 6, 7, 8), None], + [(5, 6, 7, 8), ()], + [(5, 6, 7, 8), -2], + [(5, 6, 7, 8), (0, 1, 3)], + ]) + @pytest.mark.parametrize("trim", ['fb', 'f', 'b']) + def test_multiple_axes(self, shape, axis, trim): + rng = np.random.default_rng(4321) + data, expected = self.construct_input_output(rng, shape, axis, trim) + assert_array_equal(trim_zeros(data, axis=axis, trim=trim), expected) + class TestExtins: @@ -1732,6 +1786,15 @@ def test_string_ticket_1892(self): s = '0123456789' * 10 assert_equal(s, f(s)) + def test_dtype_promotion_gh_29189(self): + # dtype should not be silently promoted (int32 -> int64) + dtypes = [np.int16, np.int32, np.int64, np.float16, np.float32, np.float64] + + for dtype in dtypes: + x = np.asarray([1, 2, 3], dtype=dtype) + y = np.vectorize(lambda x: x + x)(x) + assert x.dtype == y.dtype + def test_cache(self): # Ensure that vectorized func called exactly once per argument. _calls = [0] @@ -2042,6 +2105,9 @@ def unbound(*args): ('bound', A.iters), ('unbound', 0), ]) + @pytest.mark.thread_unsafe( + reason="test result depends on the reference count of a global object" + ) def test_frompyfunc_leaks(self, name, incr): # exposed in gh-11867 as np.vectorized, but the problem stems from # frompyfunc. @@ -2398,6 +2464,7 @@ def test_float16_underflow(self): # resulting in nan assert_array_equal(sinc(x), np.asarray(1.0)) + class TestUnique: def test_simple(self): @@ -2461,28 +2528,6 @@ def test_simple(self): assert_almost_equal(tgt2, self.res2) assert_(np.all(np.abs(tgt2) <= 1.0)) - def test_ddof(self): - # ddof raises DeprecationWarning - with suppress_warnings() as sup: - warnings.simplefilter("always") - assert_warns(DeprecationWarning, corrcoef, self.A, ddof=-1) - sup.filter(DeprecationWarning) - # ddof has no or negligible effect on the function - assert_almost_equal(corrcoef(self.A, ddof=-1), self.res1) - assert_almost_equal(corrcoef(self.A, self.B, ddof=-1), self.res2) - assert_almost_equal(corrcoef(self.A, ddof=3), self.res1) - assert_almost_equal(corrcoef(self.A, self.B, ddof=3), self.res2) - - def test_bias(self): - # bias raises DeprecationWarning - with suppress_warnings() as sup: - warnings.simplefilter("always") - assert_warns(DeprecationWarning, corrcoef, self.A, self.B, 1, 0) - assert_warns(DeprecationWarning, corrcoef, self.A, bias=0) - sup.filter(DeprecationWarning) - # bias has no or negligible effect on the function - assert_almost_equal(corrcoef(self.A, bias=1), self.res1) - def test_complex(self): x = np.array([[1, 2, 3], [1j, 2j, 3j]]) res = corrcoef(x) @@ -2511,7 +2556,7 @@ def test_extreme(self): assert_array_almost_equal(c, np.array([[1., -1.], [-1., 1.]])) assert_(np.all(np.abs(c) <= 1.0)) - @pytest.mark.parametrize("test_type", [np.half, np.single, np.double, np.longdouble]) + @pytest.mark.parametrize("test_type", np_floats) def test_corrcoef_dtype(self, test_type): cast_A = self.A.astype(test_type) res = corrcoef(cast_A, dtype=test_type) @@ -2617,7 +2662,7 @@ def test_unit_fweights_and_aweights(self): aweights=self.unit_weights), self.res1) - @pytest.mark.parametrize("test_type", [np.half, np.single, np.double, np.longdouble]) + @pytest.mark.parametrize("test_type", np_floats) def test_cov_dtype(self, test_type): cast_x1 = self.x1.astype(test_type) res = cov(cast_x1, dtype=test_type) @@ -2639,7 +2684,8 @@ def test_simple(self): # need at least one test above 8, as the implementation is piecewise A = np.array([0.49842636, 0.6969809, 0.22011976, 0.0155549, 10.0]) - expected = np.array([1.06307822, 1.12518299, 1.01214991, 1.00006049, 2815.71662847]) + expected = np.array([1.06307822, 1.12518299, 1.01214991, + 1.00006049, 2815.71662847]) assert_almost_equal(i0(A), expected) assert_almost_equal(i0(-A), expected) @@ -3132,23 +3178,27 @@ def test_non_finite_any_nan(self, sc): def test_non_finite_inf(self, sc): """ Test that interp between opposite infs gives nan """ - assert_equal(np.interp(0.5, [-np.inf, +np.inf], sc([ 0, 10])), sc(np.nan)) - assert_equal(np.interp(0.5, [ 0, 1], sc([-np.inf, +np.inf])), sc(np.nan)) - assert_equal(np.interp(0.5, [ 0, 1], sc([+np.inf, -np.inf])), sc(np.nan)) + inf = np.inf + nan = np.nan + assert_equal(np.interp(0.5, [-inf, +inf], sc([ 0, 10])), sc(nan)) + assert_equal(np.interp(0.5, [ 0, 1], sc([-inf, +inf])), sc(nan)) + assert_equal(np.interp(0.5, [ 0, 1], sc([+inf, -inf])), sc(nan)) # unless the y values are equal assert_equal(np.interp(0.5, [-np.inf, +np.inf], sc([ 10, 10])), sc(10)) def test_non_finite_half_inf_xf(self, sc): """ Test that interp where both axes have a bound at inf gives nan """ - assert_equal(np.interp(0.5, [-np.inf, 1], sc([-np.inf, 10])), sc(np.nan)) - assert_equal(np.interp(0.5, [-np.inf, 1], sc([+np.inf, 10])), sc(np.nan)) - assert_equal(np.interp(0.5, [-np.inf, 1], sc([ 0, -np.inf])), sc(np.nan)) - assert_equal(np.interp(0.5, [-np.inf, 1], sc([ 0, +np.inf])), sc(np.nan)) - assert_equal(np.interp(0.5, [ 0, +np.inf], sc([-np.inf, 10])), sc(np.nan)) - assert_equal(np.interp(0.5, [ 0, +np.inf], sc([+np.inf, 10])), sc(np.nan)) - assert_equal(np.interp(0.5, [ 0, +np.inf], sc([ 0, -np.inf])), sc(np.nan)) - assert_equal(np.interp(0.5, [ 0, +np.inf], sc([ 0, +np.inf])), sc(np.nan)) + inf = np.inf + nan = np.nan + assert_equal(np.interp(0.5, [-inf, 1], sc([-inf, 10])), sc(nan)) + assert_equal(np.interp(0.5, [-inf, 1], sc([+inf, 10])), sc(nan)) + assert_equal(np.interp(0.5, [-inf, 1], sc([ 0, -inf])), sc(nan)) + assert_equal(np.interp(0.5, [-inf, 1], sc([ 0, +inf])), sc(nan)) + assert_equal(np.interp(0.5, [ 0, +inf], sc([-inf, 10])), sc(nan)) + assert_equal(np.interp(0.5, [ 0, +inf], sc([+inf, 10])), sc(nan)) + assert_equal(np.interp(0.5, [ 0, +inf], sc([ 0, -inf])), sc(nan)) + assert_equal(np.interp(0.5, [ 0, +inf], sc([ 0, +inf])), sc(nan)) def test_non_finite_half_inf_x(self, sc): """ Test interp where the x axis has a bound at inf """ @@ -3227,6 +3277,16 @@ def test_period(self): assert_almost_equal(np.interp(x, xp, fp, period=360), y) +quantile_methods = [ + 'inverted_cdf', 'averaged_inverted_cdf', 'closest_observation', + 'interpolated_inverted_cdf', 'hazen', 'weibull', 'linear', + 'median_unbiased', 'normal_unbiased', 'nearest', 'lower', 'higher', + 'midpoint'] + + +methods_supporting_weights = ["inverted_cdf"] + + class TestPercentile: def test_basic(self): @@ -3820,15 +3880,38 @@ def test_nat_basic(self, dtype, pos): res = np.percentile(a, 30, axis=0) assert_array_equal(np.isnat(res), [False, True, False]) - -quantile_methods = [ - 'inverted_cdf', 'averaged_inverted_cdf', 'closest_observation', - 'interpolated_inverted_cdf', 'hazen', 'weibull', 'linear', - 'median_unbiased', 'normal_unbiased', 'nearest', 'lower', 'higher', - 'midpoint'] - - -methods_supporting_weights = ["inverted_cdf"] + @pytest.mark.parametrize("qtype", [np.float16, np.float32]) + @pytest.mark.parametrize("method", quantile_methods) + def test_percentile_gh_29003(self, qtype, method): + # test that with float16 or float32 input we do not get overflow + zero = qtype(0) + one = qtype(1) + a = np.zeros(65521, qtype) + a[:20_000] = one + z = np.percentile(a, 50, method=method) + assert z == zero + assert z.dtype == a.dtype + z = np.percentile(a, 99, method=method) + assert z == one + assert z.dtype == a.dtype + + def test_percentile_gh_29003_Fraction(self): + zero = Fraction(0) + one = Fraction(1) + a = np.array([zero] * 65521) + a[:20_000] = one + z = np.percentile(a, 50) + assert z == zero + z = np.percentile(a, Fraction(50)) + assert z == zero + assert np.array(z).dtype == a.dtype + + z = np.percentile(a, 99) + assert z == one + # test that with only Fraction input the return type is a Fraction + z = np.percentile(a, Fraction(99)) + assert z == one + assert np.array(z).dtype == a.dtype class TestQuantile: @@ -3875,7 +3958,7 @@ def test_fraction(self): q = np.quantile(x, .5) assert_equal(q, 1.75) - assert_equal(type(q), np.float64) + assert isinstance(q, float) q = np.quantile(x, Fraction(1, 2)) assert_equal(q, Fraction(7, 4)) @@ -4145,6 +4228,17 @@ def test_quantile_with_weights_and_axis(self, method): ) assert_allclose(q, q_res) + # axis is a tuple of all axes + q = np.quantile(y, alpha, weights=w, method=method, axis=(0, 1, 2)) + q_res = np.quantile(y, alpha, weights=w, method=method, axis=None) + assert_allclose(q, q_res) + + q = np.quantile(y, alpha, weights=w, method=method, axis=(1, 2)) + q_res = np.zeros(shape=(2,)) + for i in range(2): + q_res[i] = np.quantile(y[i], alpha, weights=w[i], method=method) + assert_allclose(q, q_res) + @pytest.mark.parametrize("method", methods_supporting_weights) def test_quantile_weights_min_max(self, method): # Test weighted quantile at 0 and 1 with leading and trailing zero @@ -4194,6 +4288,53 @@ def test_closest_observation(self): assert_equal(4, np.quantile(arr[0:9], q, method=m)) assert_equal(5, np.quantile(arr, q, method=m)) + @pytest.mark.parametrize("weights", + [[1, np.inf, 1, 1], [1, np.inf, 1, np.inf], [0, 0, 0, 0], + [np.finfo("float64").max] * 4]) + @pytest.mark.parametrize("dty", ["f8", "O"]) + def test_inf_zeroes_err(self, weights, dty): + m = "inverted_cdf" + q = 0.5 + arr = np.array([[1, 2, 3, 4]] * 2) + # Make one entry have bad weights and another good ones. + wgts = np.array([weights, [0.5] * 4], dtype=dty) + with pytest.raises(ValueError, + match=r"Weights included NaN, inf or were all zero"): + # We (currently) don't bother to check ahead so 0/0 or + # overflow to `inf` while summing weights, or `inf / inf` + # will all warn before the error is raised. + with np.errstate(all="ignore"): + a = np.quantile(arr, q, weights=wgts, method=m, axis=1) + + @pytest.mark.parametrize("weights", + [[1, np.nan, 1, 1], [1, np.nan, np.nan, 1]]) + @pytest.mark.parametrize(["err", "dty"], + [(ValueError, "f8"), ((RuntimeWarning, ValueError), "O")]) + def test_nan_err(self, err, dty, weights): + m = "inverted_cdf" + q = 0.5 + arr = np.array([[1, 2, 3, 4]] * 2) + # Make one entry have bad weights and another good ones. + wgts = np.array([weights, [0.5] * 4], dtype=dty) + with pytest.raises(err): + a = np.quantile(arr, q, weights=wgts, method=m) + + def test_quantile_gh_29003_Fraction(self): + r = np.quantile([1, 2], q=Fraction(1)) + assert r == Fraction(2) + assert isinstance(r, Fraction) + + r = np.quantile([1, 2], q=Fraction(.5)) + assert r == Fraction(3, 2) + assert isinstance(r, Fraction) + + def test_float16_gh_29003(self): + a = np.arange(50_001, dtype=np.float16) + q = .999 + value = np.quantile(a, q) + assert value == q * 50_000 + assert value.dtype == np.float16 + class TestLerp: @hypothesis.given(t0=st.floats(allow_nan=False, allow_infinity=False, diff --git a/numpy/lib/tests/test_histograms.py b/numpy/lib/tests/test_histograms.py index b7752d1a8f1e..cae11cfdcd65 100644 --- a/numpy/lib/tests/test_histograms.py +++ b/numpy/lib/tests/test_histograms.py @@ -1,3 +1,5 @@ +import warnings + import pytest import numpy as np @@ -12,7 +14,6 @@ assert_equal, assert_raises, assert_raises_regex, - suppress_warnings, ) @@ -135,14 +136,12 @@ def test_bool_conversion(self): a = np.array([1, 1, 0], dtype=np.uint8) int_hist, int_edges = np.histogram(a) - # Should raise an warning on booleans + # Should raise a warning on booleans # Ensure that the histograms are equivalent, need to suppress # the warnings to get the actual outputs - with suppress_warnings() as sup: - rec = sup.record(RuntimeWarning, 'Converting input from .*') + with pytest.warns(RuntimeWarning, match='Converting input from .*'): hist, edges = np.histogram([True, True, False]) # A warning should be issued - assert_equal(len(rec), 1) assert_array_equal(hist, int_hist) assert_array_equal(edges, int_edges) @@ -284,9 +283,8 @@ def test_some_nan_values(self): all_nan = np.array([np.nan, np.nan]) # the internal comparisons with NaN give warnings - sup = suppress_warnings() - sup.filter(RuntimeWarning) - with sup: + with warnings.catch_warnings(): + warnings.simplefilter('ignore', RuntimeWarning) # can't infer range with nan assert_raises(ValueError, histogram, one_nan, bins='auto') assert_raises(ValueError, histogram, all_nan, bins='auto') @@ -554,7 +552,8 @@ def test_outlier(self): assert_equal(len(a), numbins) def test_scott_vs_stone(self): - """Verify that Scott's rule and Stone's rule converges for normally distributed data""" + # Verify that Scott's rule and Stone's rule converges for normally + # distributed data def nbins_ratio(seed, size): rng = np.random.RandomState(seed) @@ -562,10 +561,11 @@ def nbins_ratio(seed, size): a, b = len(np.histogram(x, 'stone')[0]), len(np.histogram(x, 'scott')[0]) return a / (a + b) - ll = [[nbins_ratio(seed, size) for size in np.geomspace(start=10, stop=100, num=4).round().astype(int)] - for seed in range(10)] + geom_space = np.geomspace(start=10, stop=100, num=4).round().astype(int) + ll = [[nbins_ratio(seed, size) for size in geom_space] for seed in range(10)] - # the average difference between the two methods decreases as the dataset size increases. + # the average difference between the two methods decreases as the dataset + # size increases. avg = abs(np.mean(ll, axis=0) - 0.5) assert_almost_equal(avg, [0.15, 0.09, 0.08, 0.03], decimal=2) @@ -616,9 +616,9 @@ def test_integer(self, bins): """ Test that bin width for integer data is at least 1. """ - with suppress_warnings() as sup: + with warnings.catch_warnings(): if bins == 'stone': - sup.filter(RuntimeWarning) + warnings.simplefilter('ignore', RuntimeWarning) assert_equal( np.histogram_bin_edges(np.tile(np.arange(9), 1000), bins), np.arange(9)) diff --git a/numpy/lib/tests/test_index_tricks.py b/numpy/lib/tests/test_index_tricks.py index ed8709db5238..81e47ec3dff2 100644 --- a/numpy/lib/tests/test_index_tricks.py +++ b/numpy/lib/tests/test_index_tricks.py @@ -199,6 +199,11 @@ def test_empty_array_unravel(self): with assert_raises(ValueError): np.unravel_index([1], (2, 1, 0)) + def test_regression_size_1_index(self): + # actually tests the nditer size one index tracking + # regression test for gh-29690 + np.unravel_index(np.array([[1, 0, 1, 0]], dtype=np.uint32), (4,)) + class TestGrid: def test_basic(self): a = mgrid[-1:1:10j] @@ -566,3 +571,123 @@ def test_ndindex(): # Make sure 0-sized ndindex works correctly x = list(ndindex(*[0])) assert_equal(x, []) + + +def test_ndindex_zero_dimensions_explicit(): + """Test ndindex produces empty iterators for explicit + zero-length dimensions.""" + assert list(np.ndindex(0, 3)) == [] + assert list(np.ndindex(3, 0, 2)) == [] + assert list(np.ndindex(0)) == [] + + +@pytest.mark.parametrize("bad_shape", [2.5, "2", [2, 3], (2.0, 3)]) +def test_ndindex_non_integer_dimensions(bad_shape): + """Test that non-integer dimensions raise TypeError.""" + with pytest.raises(TypeError): + # Passing invalid_shape_arg directly to ndindex. It will try to use it + # as a dimension and should trigger a TypeError. + list(np.ndindex(bad_shape)) + + +def test_ndindex_stop_iteration_behavior(): + """Test that StopIteration is raised properly after exhaustion.""" + it = np.ndindex(2, 2) + # Exhaust the iterator + list(it) + # Should raise StopIteration on subsequent calls + with pytest.raises(StopIteration): + next(it) + + +def test_ndindex_iterator_independence(): + """Test that each ndindex instance creates independent iterators.""" + shape = (2, 3) + iter1 = np.ndindex(*shape) + iter2 = np.ndindex(*shape) + + next(iter1) + next(iter1) + + assert_equal(next(iter2), (0, 0)) + assert_equal(next(iter1), (0, 2)) + + +def test_ndindex_tuple_vs_args_consistency(): + """Test that ndindex(shape) and ndindex(*shape) produce same results.""" + # Single dimension + assert_equal(list(np.ndindex(5)), list(np.ndindex((5,)))) + + # Multiple dimensions + assert_equal(list(np.ndindex(2, 3)), list(np.ndindex((2, 3)))) + + # Complex shape + shape = (2, 1, 4) + assert_equal(list(np.ndindex(*shape)), list(np.ndindex(shape))) + + +def test_ndindex_against_ndenumerate_compatibility(): + """Test ndindex produces same indices as ndenumerate.""" + for shape in [(1, 2, 3), (3,), (2, 2), ()]: + ndindex_result = list(np.ndindex(shape)) + ndenumerate_indices = [ix for ix, _ in np.ndenumerate(np.zeros(shape))] + assert_array_equal(ndindex_result, ndenumerate_indices) + + +def test_ndindex_multidimensional_correctness(): + """Test ndindex produces correct indices for multidimensional arrays.""" + shape = (2, 1, 3) + result = list(np.ndindex(*shape)) + expected = [ + (0, 0, 0), + (0, 0, 1), + (0, 0, 2), + (1, 0, 0), + (1, 0, 1), + (1, 0, 2), + ] + assert_equal(result, expected) + + +def test_ndindex_large_dimensions_behavior(): + """Test ndindex behaves correctly when initialized with large dimensions.""" + large_shape = (1000, 1000) + iter_obj = np.ndindex(*large_shape) + first_element = next(iter_obj) + assert_equal(first_element, (0, 0)) + + +def test_ndindex_empty_iterator_behavior(): + """Test detailed behavior of empty iterators.""" + empty_iter = np.ndindex(0, 5) + assert_equal(list(empty_iter), []) + + empty_iter2 = np.ndindex(3, 0, 2) + with pytest.raises(StopIteration): + next(empty_iter2) + + +@pytest.mark.parametrize( + "negative_shape_arg", + [ + (-1,), # Single negative dimension + (2, -3, 4), # Negative dimension in the middle + (5, 0, -2), # Mix of valid (0) and invalid (negative) dimensions + ], +) +def test_ndindex_negative_dimensions(negative_shape_arg): + """Test that negative dimensions raise ValueError.""" + with pytest.raises(ValueError): + ndindex(negative_shape_arg) + + +def test_ndindex_empty_shape(): + import numpy as np + # ndindex() and ndindex(()) should return a single empty tuple + assert list(np.ndindex()) == [()] + assert list(np.ndindex(())) == [()] + +def test_ndindex_negative_dim_raises(): + # ndindex(-1) should raise a ValueError + with pytest.raises(ValueError): + list(np.ndindex(-1)) diff --git a/numpy/lib/tests/test_io.py b/numpy/lib/tests/test_io.py index 79fca0dd690b..4051e203dacf 100644 --- a/numpy/lib/tests/test_io.py +++ b/numpy/lib/tests/test_io.py @@ -7,6 +7,7 @@ import threading import time import warnings +import zipfile from ctypes import c_bool from datetime import datetime from io import BytesIO, StringIO @@ -35,9 +36,7 @@ assert_no_warnings, assert_raises, assert_raises_regex, - assert_warns, break_cycles, - suppress_warnings, tempdir, temppath, ) @@ -126,8 +125,6 @@ def roundtrip(self, save_func, *args, **kwargs): arr_reloaded = np.load(load_file, **load_kwds) - self.arr = arr - self.arr_reloaded = arr_reloaded finally: if not isinstance(target_file, BytesIO): target_file.close() @@ -136,6 +133,8 @@ def roundtrip(self, save_func, *args, **kwargs): if not isinstance(arr_reloaded, np.lib.npyio.NpzFile): os.remove(target_file.name) + return arr, arr_reloaded + def check_roundtrips(self, a): self.roundtrip(a) self.roundtrip(a, file_on_disk=True) @@ -196,30 +195,47 @@ def test_format_2_0(self): class TestSaveLoad(RoundtripTest): def roundtrip(self, *args, **kwargs): - RoundtripTest.roundtrip(self, np.save, *args, **kwargs) - assert_equal(self.arr[0], self.arr_reloaded) - assert_equal(self.arr[0].dtype, self.arr_reloaded.dtype) - assert_equal(self.arr[0].flags.fnc, self.arr_reloaded.flags.fnc) + arr, arr_reloaded = RoundtripTest.roundtrip(self, np.save, *args, **kwargs) + assert_equal(arr[0], arr_reloaded) + assert_equal(arr[0].dtype, arr_reloaded.dtype) + assert_equal(arr[0].flags.fnc, arr_reloaded.flags.fnc) class TestSavezLoad(RoundtripTest): def roundtrip(self, *args, **kwargs): - RoundtripTest.roundtrip(self, np.savez, *args, **kwargs) + arr, arr_reloaded = RoundtripTest.roundtrip(self, np.savez, *args, **kwargs) try: - for n, arr in enumerate(self.arr): - reloaded = self.arr_reloaded['arr_%d' % n] - assert_equal(arr, reloaded) - assert_equal(arr.dtype, reloaded.dtype) - assert_equal(arr.flags.fnc, reloaded.flags.fnc) + for n, a in enumerate(arr): + reloaded = arr_reloaded['arr_%d' % n] + assert_equal(a, reloaded) + assert_equal(a.dtype, reloaded.dtype) + assert_equal(a.flags.fnc, reloaded.flags.fnc) finally: # delete tempfile, must be done here on windows - if self.arr_reloaded.fid: - self.arr_reloaded.fid.close() - os.remove(self.arr_reloaded.fid.name) + if arr_reloaded.fid: + arr_reloaded.fid.close() + os.remove(arr_reloaded.fid.name) + + def test_load_non_npy(self): + """Test loading non-.npy files and name mapping in .npz.""" + with temppath(prefix="numpy_test_npz_load_non_npy_", suffix=".npz") as tmp: + with zipfile.ZipFile(tmp, "w") as npz: + with npz.open("test1.npy", "w") as out_file: + np.save(out_file, np.arange(10)) + with npz.open("test2", "w") as out_file: + np.save(out_file, np.arange(10)) + with npz.open("metadata", "w") as out_file: + out_file.write(b"Name: Test") + with np.load(tmp) as npz: + assert len(npz["test1"]) == 10 + assert len(npz["test1.npy"]) == 10 + assert len(npz["test2"]) == 10 + assert npz["metadata"] == b"Name: Test" @pytest.mark.skipif(IS_PYPY, reason="Hangs on PyPy") @pytest.mark.skipif(not IS_64BIT, reason="Needs 64bit platform") @pytest.mark.slow + @pytest.mark.thread_unsafe(reason="crashes with low memory") def test_big_arrays(self): L = (1 << 31) + 100000 a = np.empty(L, dtype=np.uint8) @@ -319,8 +335,9 @@ def test_closing_fid(self): # goes to zero. Python running in debug mode raises a # ResourceWarning when file closing is left to the garbage # collector, so we catch the warnings. - with suppress_warnings() as sup: - sup.filter(ResourceWarning) # TODO: specify exact message + with warnings.catch_warnings(): + # TODO: specify exact message + warnings.simplefilter('ignore', ResourceWarning) for i in range(1, 1025): try: np.load(tmp)["data"] @@ -614,6 +631,7 @@ def test_unicode_and_bytes_fmt(self, iotype): @pytest.mark.skipif(sys.platform == 'win32', reason="files>4GB may not work") @pytest.mark.slow @requires_memory(free_bytes=7e9) + @pytest.mark.thread_unsafe(reason="crashes with low memory") def test_large_zip(self): def check_large_zip(memoryerror_raised): memoryerror_raised.value = False @@ -645,7 +663,8 @@ def check_large_zip(memoryerror_raised): raise MemoryError("Child process raised a MemoryError exception") # -9 indicates a SIGKILL, probably an OOM. if p.exitcode == -9: - pytest.xfail("subprocess got a SIGKILL, apparently free memory was not sufficient") + msg = "subprocess got a SIGKILL, apparently free memory was not sufficient" + pytest.xfail(msg) assert p.exitcode == 0 class LoadTxtBase: @@ -824,8 +843,6 @@ def test_comments_multiple(self): a = np.array([[1, 2, 3], [4, 5, 6]], int) assert_array_equal(x, a) - @pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8), - reason="PyPy bug in error formatting") def test_comments_multi_chars(self): c = TextIO() c.write('/* comment\n1,2,3,5\n') @@ -1042,8 +1059,6 @@ def test_from_float_hex(self): c, dtype=dt, converters=float.fromhex, encoding="latin1") assert_equal(res, tgt, err_msg=f"{dt}") - @pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8), - reason="PyPy bug in error formatting") def test_default_float_converter_no_default_hex_conversion(self): """ Ensure that fromhex is only used for values with the correct prefix and @@ -1054,8 +1069,6 @@ def test_default_float_converter_no_default_hex_conversion(self): match=".*convert string 'a' to float64 at row 0, column 1"): np.loadtxt(c) - @pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8), - reason="PyPy bug in error formatting") def test_default_float_converter_exception(self): """ Ensure that the exception message raised during failed floating point @@ -1259,12 +1272,16 @@ def test_max_rows_larger(self): (1, ["ignored\n", "1,2\n", "\n", "3,4\n"]), # "Bad" lines that do not end in newlines: (1, ["ignored", "1,2", "", "3,4"]), - (1, StringIO("ignored\n1,2\n\n3,4")), + (1, lambda: StringIO("ignored\n1,2\n\n3,4")), # Same as above, but do not skip any lines: (0, ["-1,0\n", "1,2\n", "\n", "3,4\n"]), (0, ["-1,0", "1,2", "", "3,4"]), - (0, StringIO("-1,0\n1,2\n\n3,4"))]) + (0, lambda: StringIO("-1,0\n1,2\n\n3,4"))]) def test_max_rows_empty_lines(self, skip, data): + # gh-26718 re-instantiate StringIO objects each time + if callable(data): + data = data() + with pytest.warns(UserWarning, match=f"Input line 3.*max_rows={3 - skip}"): res = np.loadtxt(data, dtype=int, skiprows=skip, delimiter=",", @@ -1428,8 +1445,8 @@ def test_skip_footer(self): assert_equal(test, ctrl) def test_skip_footer_with_invalid(self): - with suppress_warnings() as sup: - sup.filter(ConversionWarning) + with warnings.catch_warnings(): + warnings.simplefilter('ignore', ConversionWarning) basestr = '1 1\n2 2\n3 3\n4 4\n5 \n6 \n7 \n' # Footer too small to get rid of all invalid values assert_raises(ValueError, np.genfromtxt, @@ -1673,7 +1690,8 @@ def test_dtype_with_converters_and_usecols(self): conv = {0: int, 1: int, 2: int, 3: lambda r: dmap[r.decode()]} test = recfromcsv(TextIO(dstr,), dtype=dtyp, delimiter=',', names=None, converters=conv, encoding="bytes") - control = np.rec.array([(1, 5, -1, 0), (2, 8, -1, 1), (3, 3, -2, 3)], dtype=dtyp) + control = np.rec.array([(1, 5, -1, 0), (2, 8, -1, 1), (3, 3, -2, 3)], + dtype=dtyp) assert_equal(test, control) dtyp = [('e1', 'i4'), ('e2', 'i4'), ('n', 'i1')] test = recfromcsv(TextIO(dstr,), dtype=dtyp, delimiter=',', @@ -1826,8 +1844,8 @@ def test_usecols_with_named_columns(self): def test_empty_file(self): # Test that an empty file raises the proper warning. - with suppress_warnings() as sup: - sup.filter(message="genfromtxt: Empty input file:") + with warnings.catch_warnings(): + warnings.filterwarnings('ignore', message="genfromtxt: Empty input file:") data = TextIO() test = np.genfromtxt(data) assert_equal(test, np.array([])) @@ -1886,7 +1904,8 @@ def test_user_missing_values(self): # basekwargs['dtype'] = mdtype test = np.genfromtxt(TextIO(data), - missing_values={0: -9, 1: -99, 2: -999j}, usemask=True, **basekwargs) + missing_values={0: -9, 1: -99, 2: -999j}, + usemask=True, **basekwargs) control = ma.array([(0, 0.0, 0j), (1, -999, 1j), (-9, 2.2, -999j), (3, -99, 3j)], mask=[(0, 0, 0), (0, 1, 0), (1, 0, 1), (0, 1, 0)], @@ -1971,7 +1990,7 @@ def test_invalid_raise(self): def f(): return np.genfromtxt(mdata, invalid_raise=False, **kwargs) - mtest = assert_warns(ConversionWarning, f) + mtest = pytest.warns(ConversionWarning, f) assert_equal(len(mtest), 45) assert_equal(mtest, np.ones(45, dtype=[(_, int) for _ in 'abcde'])) # @@ -1992,7 +2011,7 @@ def test_invalid_raise_with_usecols(self): def f(): return np.genfromtxt(mdata, usecols=(0, 4), **kwargs) - mtest = assert_warns(ConversionWarning, f) + mtest = pytest.warns(ConversionWarning, f) assert_equal(len(mtest), 45) assert_equal(mtest, np.ones(45, dtype=[(_, int) for _ in 'ae'])) # @@ -2404,8 +2423,8 @@ def test_max_rows(self): assert_raises(ValueError, np.genfromtxt, TextIO(data), max_rows=4) # Test with invalid not raise - with suppress_warnings() as sup: - sup.filter(ConversionWarning) + with warnings.catch_warnings(): + warnings.simplefilter('ignore', ConversionWarning) test = np.genfromtxt(TextIO(data), max_rows=4, invalid_raise=False) control = np.array([[1., 1.], [2., 2.], [3., 3.], [4., 4.]]) @@ -2549,7 +2568,7 @@ def test_squeeze_scalar(self): @pytest.mark.parametrize("ndim", [0, 1, 2]) def test_ndmin_keyword(self, ndim: int): - # lets have the same behaviour of ndmin as loadtxt + # let's have the same behaviour of ndmin as loadtxt # as they should be the same for non-missing values txt = "42" @@ -2783,6 +2802,7 @@ def test_npzfile_dict(): @pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts") +@pytest.mark.thread_unsafe(reason="garbage collector is global state") def test_load_refcount(): # Check that objects returned by np.load are directly freed based on # their refcount, rather than needing the gc to collect them. diff --git a/numpy/lib/tests/test_loadtxt.py b/numpy/lib/tests/test_loadtxt.py index a2022a0d5175..a164bf38f189 100644 --- a/numpy/lib/tests/test_loadtxt.py +++ b/numpy/lib/tests/test_loadtxt.py @@ -13,7 +13,7 @@ import numpy as np from numpy.ma.testutils import assert_equal -from numpy.testing import HAS_REFCOUNT, IS_PYPY, assert_array_equal +from numpy.testing import HAS_REFCOUNT, assert_array_equal def test_scientific_notation(): @@ -40,10 +40,9 @@ def test_comment_multiple_chars(comment): assert_equal(a, [[1.5, 2.5], [3.0, 4.0], [5.5, 6.0]]) -@pytest.fixture def mixed_types_structured(): """ - Fixture providing heterogeneous input data with a structured dtype, along + Function providing heterogeneous input data with a structured dtype, along with the associated structured array. """ data = StringIO( @@ -74,15 +73,14 @@ def mixed_types_structured(): @pytest.mark.parametrize('skiprows', [0, 1, 2, 3]) -def test_structured_dtype_and_skiprows_no_empty_lines( - skiprows, mixed_types_structured): - data, dtype, expected = mixed_types_structured +def test_structured_dtype_and_skiprows_no_empty_lines(skiprows): + data, dtype, expected = mixed_types_structured() a = np.loadtxt(data, dtype=dtype, delimiter=";", skiprows=skiprows) assert_array_equal(a, expected[skiprows:]) -def test_unpack_structured(mixed_types_structured): - data, dtype, expected = mixed_types_structured +def test_unpack_structured(): + data, dtype, expected = mixed_types_structured() a, b, c, d = np.loadtxt(data, dtype=dtype, delimiter=";", unpack=True) assert_array_equal(a, expected["f0"]) @@ -206,8 +204,6 @@ def test_maxrows_no_blank_lines(dtype): assert_equal(res, np.array([["1.5", "2.5"], ["3.0", "4.0"]], dtype=dtype)) -@pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8), - reason="PyPy bug in error formatting") @pytest.mark.parametrize("dtype", (np.dtype("f8"), np.dtype("i2"))) def test_exception_message_bad_values(dtype): txt = StringIO("1,2\n3,XXX\n5,6") @@ -395,8 +391,6 @@ def test_bool(): assert_array_equal(res.view(np.uint8), [[1, 0], [1, 1]]) -@pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8), - reason="PyPy bug in error formatting") @pytest.mark.parametrize("dtype", np.typecodes["AllInteger"]) @pytest.mark.filterwarnings("error:.*integer via a float.*:DeprecationWarning") def test_integer_signs(dtype): @@ -413,8 +407,6 @@ def test_integer_signs(dtype): np.loadtxt([f"{sign}2\n"], dtype=dtype) -@pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8), - reason="PyPy bug in error formatting") @pytest.mark.parametrize("dtype", np.typecodes["AllInteger"]) @pytest.mark.filterwarnings("error:.*integer via a float.*:DeprecationWarning") def test_implicit_cast_float_to_int_fails(dtype): @@ -485,8 +477,6 @@ def conv(x): assert sys.getrefcount(sentinel) == 2 -@pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8), - reason="PyPy bug in error formatting") def test_character_not_bytes_compatible(): """Test exception when a character cannot be encoded as 'S'.""" data = StringIO("–") # == \u2013 @@ -504,8 +494,6 @@ def test_invalid_converter(conv): np.loadtxt(StringIO("1 2\n3 4"), converters=conv) -@pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8), - reason="PyPy bug in error formatting") def test_converters_dict_raises_non_integer_key(): with pytest.raises(TypeError, match="keys of the converters dict"): np.loadtxt(StringIO("1 2\n3 4"), converters={"a": int}) @@ -571,8 +559,6 @@ def test_quote_support_default(): assert_array_equal(res, expected) -@pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8), - reason="PyPy bug in error formatting") def test_quotechar_multichar_error(): txt = StringIO("1,2\n3,4") msg = r".*must be a single unicode character or None" @@ -732,8 +718,6 @@ def test_unicode_whitespace_stripping_complex(dtype): assert_array_equal(res, np.array([[1, 2 + 3j, 4 + 5j, 6 - 7j, 8j, 9j]] * 2)) -@pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8), - reason="PyPy bug in error formatting") @pytest.mark.parametrize("dtype", "FD") @pytest.mark.parametrize("field", ["1 +2j", "1+ 2j", "1+2 j", "1+-+3", "(1j", "(1", "(1+2j", "1+2j)"]) @@ -742,8 +726,6 @@ def test_bad_complex(dtype, field): np.loadtxt([field + "\n"], dtype=dtype, delimiter=",") -@pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8), - reason="PyPy bug in error formatting") @pytest.mark.parametrize("dtype", np.typecodes["AllInteger"] + "efgdFDG" + "?") def test_nul_character_error(dtype): @@ -755,8 +737,6 @@ def test_nul_character_error(dtype): np.loadtxt(["1\000"], dtype=dtype, delimiter=",", quotechar='"') -@pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8), - reason="PyPy bug in error formatting") @pytest.mark.parametrize("dtype", np.typecodes["AllInteger"] + "efgdFDG" + "?") def test_no_thousands_support(dtype): @@ -1024,8 +1004,6 @@ def test_str_dtype_unit_discovery_with_converter(): assert_equal(a, expected) -@pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8), - reason="PyPy bug in error formatting") def test_control_character_empty(): with pytest.raises(TypeError, match="Text reading control character must"): np.loadtxt(StringIO("1 2 3"), delimiter="") diff --git a/numpy/lib/tests/test_nanfunctions.py b/numpy/lib/tests/test_nanfunctions.py index 89a6d1f95fed..6ef86bf84ee0 100644 --- a/numpy/lib/tests/test_nanfunctions.py +++ b/numpy/lib/tests/test_nanfunctions.py @@ -15,7 +15,6 @@ assert_equal, assert_raises, assert_raises_regex, - suppress_warnings, ) # Test data @@ -281,8 +280,9 @@ def test_mutation(self): def test_result_values(self): for f, fcmp in zip(self.nanfuncs, [np.greater, np.less]): for row in _ndat: - with suppress_warnings() as sup: - sup.filter(RuntimeWarning, "invalid value encountered in") + with warnings.catch_warnings(): + warnings.filterwarnings( + 'ignore', "invalid value encountered in", RuntimeWarning) ind = f(row) val = row[ind] # comparing with NaN is tricky as the result @@ -491,10 +491,10 @@ def test_dtype_from_dtype(self): codes = 'efdgFDG' for nf, rf in zip(self.nanfuncs, self.stdfuncs): for c in codes: - with suppress_warnings() as sup: + with warnings.catch_warnings(): if nf in {np.nanstd, np.nanvar} and c in 'FDG': # Giving the warning is a small bug, see gh-8000 - sup.filter(ComplexWarning) + warnings.simplefilter('ignore', ComplexWarning) tgt = rf(mat, dtype=np.dtype(c), axis=1).dtype.type res = nf(mat, dtype=np.dtype(c), axis=1).dtype.type assert_(res is tgt) @@ -508,10 +508,10 @@ def test_dtype_from_char(self): codes = 'efdgFDG' for nf, rf in zip(self.nanfuncs, self.stdfuncs): for c in codes: - with suppress_warnings() as sup: + with warnings.catch_warnings(): if nf in {np.nanstd, np.nanvar} and c in 'FDG': # Giving the warning is a small bug, see gh-8000 - sup.filter(ComplexWarning) + warnings.simplefilter('ignore', ComplexWarning) tgt = rf(mat, dtype=c, axis=1).dtype.type res = nf(mat, dtype=c, axis=1).dtype.type assert_(res is tgt) @@ -729,16 +729,16 @@ def test_ddof_too_big(self): dsize = [len(d) for d in _rdat] for nf, rf in zip(nanfuncs, stdfuncs): for ddof in range(5): - with suppress_warnings() as sup: - sup.record(RuntimeWarning) - sup.filter(ComplexWarning) + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter('always') + warnings.simplefilter('ignore', ComplexWarning) tgt = [ddof >= d for d in dsize] res = nf(_ndat, axis=1, ddof=ddof) assert_equal(np.isnan(res), tgt) - if any(tgt): - assert_(len(sup.log) == 1) - else: - assert_(len(sup.log) == 0) + if any(tgt): + assert_(len(w) == 1) + else: + assert_(len(w) == 0) @pytest.mark.parametrize("axis", [None, 0, 1]) @pytest.mark.parametrize("dtype", np.typecodes["AllFloat"]) @@ -860,8 +860,8 @@ def test_keepdims(self): w = np.random.random((4, 200)) * np.array(d.shape)[:, None] w = w.astype(np.intp) d[tuple(w)] = np.nan - with suppress_warnings() as sup: - sup.filter(RuntimeWarning) + with warnings.catch_warnings(): + warnings.simplefilter('ignore', RuntimeWarning) res = np.nanmedian(d, axis=None, keepdims=True) assert_equal(res.shape, (1, 1, 1, 1)) res = np.nanmedian(d, axis=(0, 1), keepdims=True) @@ -946,17 +946,15 @@ def test_result_values(self): @pytest.mark.parametrize("dtype", _TYPE_CODES) def test_allnans(self, dtype, axis): mat = np.full((3, 3), np.nan).astype(dtype) - with suppress_warnings() as sup: - sup.record(RuntimeWarning) - + with pytest.warns(RuntimeWarning) as r: output = np.nanmedian(mat, axis=axis) assert output.dtype == mat.dtype assert np.isnan(output).all() if axis is None: - assert_(len(sup.log) == 1) + assert_(len(r) == 1) else: - assert_(len(sup.log) == 3) + assert_(len(r) == 3) # Check scalar scalar = np.array(np.nan).astype(dtype)[()] @@ -965,9 +963,9 @@ def test_allnans(self, dtype, axis): assert np.isnan(output_scalar) if axis is None: - assert_(len(sup.log) == 2) + assert_(len(r) == 2) else: - assert_(len(sup.log) == 4) + assert_(len(r) == 4) def test_empty(self): mat = np.zeros((0, 3)) @@ -995,8 +993,8 @@ def test_extended_axis_invalid(self): assert_raises(ValueError, np.nanmedian, d, axis=(1, 1)) def test_float_special(self): - with suppress_warnings() as sup: - sup.filter(RuntimeWarning) + with warnings.catch_warnings(): + warnings.simplefilter('ignore', RuntimeWarning) for inf in [np.inf, -np.inf]: a = np.array([[inf, np.nan], [np.nan, np.nan]]) assert_equal(np.nanmedian(a, axis=0), [inf, np.nan]) @@ -1063,8 +1061,8 @@ def test_keepdims(self): w = np.random.random((4, 200)) * np.array(d.shape)[:, None] w = w.astype(np.intp) d[tuple(w)] = np.nan - with suppress_warnings() as sup: - sup.filter(RuntimeWarning) + with warnings.catch_warnings(): + warnings.simplefilter('ignore', RuntimeWarning) res = np.nanpercentile(d, 90, axis=None, keepdims=True) assert_equal(res.shape, (1, 1, 1, 1)) res = np.nanpercentile(d, 90, axis=(0, 1), keepdims=True) @@ -1233,8 +1231,9 @@ def test_multiple_percentiles(self): large_mat[:, :, 3:] *= 2 for axis in [None, 0, 1]: for keepdim in [False, True]: - with suppress_warnings() as sup: - sup.filter(RuntimeWarning, "All-NaN slice encountered") + with warnings.catch_warnings(): + warnings.filterwarnings( + 'ignore', "All-NaN slice encountered", RuntimeWarning) val = np.percentile(mat, perc, axis=axis, keepdims=keepdim) nan_val = np.nanpercentile(nan_mat, perc, axis=axis, keepdims=keepdim) @@ -1423,6 +1422,7 @@ def test__replace_nan(): assert np.isnan(arr_nan[-1]) +@pytest.mark.thread_unsafe(reason="memmap is thread-unsafe (gh-29126)") def test_memmap_takes_fast_route(tmpdir): # We want memory mapped arrays to take the fast route through nanmax, # which avoids creating a mask by using fmax.reduce (see gh-28721). So we diff --git a/numpy/lib/tests/test_polynomial.py b/numpy/lib/tests/test_polynomial.py index c173ac321d74..32547f8e6c18 100644 --- a/numpy/lib/tests/test_polynomial.py +++ b/numpy/lib/tests/test_polynomial.py @@ -61,7 +61,8 @@ def test_poly1d_math(self): assert_equal(p * q, np.poly1d([3., 8., 14., 8., 3.])) assert_equal(p + q, np.poly1d([4., 4., 4.])) assert_equal(p - q, np.poly1d([-2., 0., 2.])) - assert_equal(p ** 4, np.poly1d([1., 8., 36., 104., 214., 312., 324., 216., 81.])) + assert_equal(p ** 4, np.poly1d([1., 8., 36., 104., 214., + 312., 324., 216., 81.])) assert_equal(p(q), np.poly1d([9., 12., 16., 8., 6.])) assert_equal(q(p), np.poly1d([3., 12., 32., 40., 34.])) assert_equal(p.deriv(), np.poly1d([2., 2.])) @@ -131,12 +132,16 @@ def test_roots(self): for i in np.logspace(10, 25, num=1000, base=10): tgt = np.array([-1, 1, i]) res = np.sort(np.roots(poly.polyfromroots(tgt)[::-1])) - assert_almost_equal(res, tgt, 14 - int(np.log10(i))) # Adapting the expected precision according to the root value, to take into account numerical calculation error + # Adapting the expected precision according to the root value, + # to take into account numerical calculation error + assert_almost_equal(res, tgt, 14 - int(np.log10(i))) for i in np.logspace(10, 25, num=1000, base=10): tgt = np.array([-1, 1.01, i]) res = np.sort(np.roots(poly.polyfromroots(tgt)[::-1])) - assert_almost_equal(res, tgt, 14 - int(np.log10(i))) # Adapting the expected precision according to the root value, to take into account numerical calculation error + # Adapting the expected precision according to the root value, + # to take into account numerical calculation error + assert_almost_equal(res, tgt, 14 - int(np.log10(i))) def test_str_leading_zeros(self): p = np.poly1d([4, 3, 2, 1]) @@ -249,8 +254,8 @@ def test_complex(self): def test_integ_coeffs(self): p = np.poly1d([3, 2, 1]) p2 = p.integ(3, k=[9, 7, 6]) - assert_( - (p2.coeffs == [1 / 4. / 5., 1 / 3. / 4., 1 / 2. / 3., 9 / 1. / 2., 7, 6]).all()) + expected = [1 / 4 / 5, 1 / 3 / 4, 1 / 2 / 3, 9 / 1 / 2, 7, 6] + assert_((p2.coeffs == expected).all()) def test_zero_dims(self): try: diff --git a/numpy/lib/tests/test_recfunctions.py b/numpy/lib/tests/test_recfunctions.py index eee1f47f834f..b9cc266a9363 100644 --- a/numpy/lib/tests/test_recfunctions.py +++ b/numpy/lib/tests/test_recfunctions.py @@ -1,3 +1,4 @@ +import pytest import numpy as np import numpy.ma as ma @@ -31,19 +32,14 @@ class TestRecFunctions: # Misc tests - - def setup_method(self): + def test_zip_descr(self): + # Test zip_descr x = np.array([1, 2, ]) y = np.array([10, 20, 30]) z = np.array([('A', 1.), ('B', 2.)], dtype=[('A', '|S3'), ('B', float)]) w = np.array([(1, (2, 3.0)), (4, (5, 6.0))], dtype=[('a', int), ('b', [('ba', float), ('bb', int)])]) - self.data = (w, x, y, z) - - def test_zip_descr(self): - # Test zip_descr - (w, x, y, z) = self.data # Std array test = zip_descr((x, x), flatten=True) @@ -240,6 +236,7 @@ def test_repack_fields(self): dt = np.dtype((np.record, dt)) assert_(repack_fields(dt).type is np.record) + @pytest.mark.thread_unsafe(reason="memmap is thread-unsafe (gh-29126)") def test_structured_to_unstructured(self, tmp_path): a = np.zeros(4, dtype=[('a', 'i4'), ('b', 'f4,u2'), ('c', 'f4', 2)]) out = structured_to_unstructured(a) @@ -448,7 +445,7 @@ def test_masked_flexible(self): class TestMergeArrays: # Test merge_arrays - def setup_method(self): + def _create_arrays(self): x = np.array([1, 2, ]) y = np.array([10, 20, 30]) z = np.array( @@ -456,11 +453,11 @@ def setup_method(self): w = np.array( [(1, (2, 3.0, ())), (4, (5, 6.0, ()))], dtype=[('a', int), ('b', [('ba', float), ('bb', int), ('bc', [])])]) - self.data = (w, x, y, z) + return w, x, y, z def test_solo(self): # Test merge_arrays on a single array. - (_, x, _, z) = self.data + _, x, _, z = self._create_arrays() test = merge_arrays(x) control = np.array([(1,), (2,)], dtype=[('f0', int)]) @@ -475,7 +472,7 @@ def test_solo(self): def test_solo_w_flatten(self): # Test merge_arrays on a single array w & w/o flattening - w = self.data[0] + w = self._create_arrays()[0] test = merge_arrays(w, flatten=False) assert_equal(test, w) @@ -487,7 +484,7 @@ def test_solo_w_flatten(self): def test_standard(self): # Test standard & standard # Test merge arrays - (_, x, y, _) = self.data + _, x, y, _ = self._create_arrays() test = merge_arrays((x, y), usemask=False) control = np.array([(1, 10), (2, 20), (-1, 30)], dtype=[('f0', int), ('f1', int)]) @@ -502,7 +499,7 @@ def test_standard(self): def test_flatten(self): # Test standard & flexible - (_, x, _, z) = self.data + _, x, _, z = self._create_arrays() test = merge_arrays((x, z), flatten=True) control = np.array([(1, 'A', 1.), (2, 'B', 2.)], dtype=[('f0', int), ('A', '|S3'), ('B', float)]) @@ -516,7 +513,7 @@ def test_flatten(self): def test_flatten_wflexible(self): # Test flatten standard & nested - (w, x, _, _) = self.data + w, x, _, _ = self._create_arrays() test = merge_arrays((x, w), flatten=True) control = np.array([(1, 1, 2, 3.0), (2, 4, 5, 6.0)], dtype=[('f0', int), @@ -524,16 +521,15 @@ def test_flatten_wflexible(self): assert_equal(test, control) test = merge_arrays((x, w), flatten=False) - controldtype = [('f0', int), - ('f1', [('a', int), - ('b', [('ba', float), ('bb', int), ('bc', [])])])] + f1_descr = [('a', int), ('b', [('ba', float), ('bb', int), ('bc', [])])] + controldtype = [('f0', int), ('f1', f1_descr)] control = np.array([(1., (1, (2, 3.0, ()))), (2, (4, (5, 6.0, ())))], dtype=controldtype) assert_equal(test, control) def test_wmasked_arrays(self): # Test merge_arrays masked arrays - (_, x, _, _) = self.data + x = self._create_arrays()[1] mx = ma.array([1, 2, 3], mask=[1, 0, 0]) test = merge_arrays((x, mx), usemask=True) control = ma.array([(1, 1), (2, 2), (-1, 3)], @@ -555,7 +551,7 @@ def test_w_singlefield(self): def test_w_shorter_flex(self): # Test merge_arrays w/ a shorter flexndarray. - z = self.data[-1] + z = self._create_arrays()[-1] # Fixme, this test looks incomplete and broken #test = merge_arrays((z, np.array([10, 20, 30]).view([('C', int)]))) @@ -568,7 +564,7 @@ def test_w_shorter_flex(self): dtype=[('A', '|S3'), ('B', float), ('C', int)]) def test_singlerecord(self): - (_, x, y, z) = self.data + _, x, y, z = self._create_arrays() test = merge_arrays((x[0], y[0], z[0]), usemask=False) control = np.array([(1, 10, ('A', 1))], dtype=[('f0', int), @@ -580,18 +576,18 @@ def test_singlerecord(self): class TestAppendFields: # Test append_fields - def setup_method(self): + def _create_arrays(self): x = np.array([1, 2, ]) y = np.array([10, 20, 30]) z = np.array( [('A', 1.), ('B', 2.)], dtype=[('A', '|S3'), ('B', float)]) w = np.array([(1, (2, 3.0)), (4, (5, 6.0))], dtype=[('a', int), ('b', [('ba', float), ('bb', int)])]) - self.data = (w, x, y, z) + return w, x, y, z def test_append_single(self): # Test simple case - (_, x, _, _) = self.data + x = self._create_arrays()[1] test = append_fields(x, 'A', data=[10, 20, 30]) control = ma.array([(1, 10), (2, 20), (-1, 30)], mask=[(0, 0), (0, 0), (1, 0)], @@ -600,7 +596,7 @@ def test_append_single(self): def test_append_double(self): # Test simple case - (_, x, _, _) = self.data + x = self._create_arrays()[1] test = append_fields(x, ('A', 'B'), data=[[10, 20, 30], [100, 200]]) control = ma.array([(1, 10, 100), (2, 20, 200), (-1, 30, -1)], mask=[(0, 0, 0), (0, 0, 0), (1, 0, 1)], @@ -609,7 +605,7 @@ def test_append_double(self): def test_append_on_flex(self): # Test append_fields on flexible type arrays - z = self.data[-1] + z = self._create_arrays()[-1] test = append_fields(z, 'C', data=[10, 20, 30]) control = ma.array([('A', 1., 10), ('B', 2., 20), (-1, -1., 30)], mask=[(0, 0, 0), (0, 0, 0), (1, 1, 0)], @@ -618,7 +614,7 @@ def test_append_on_flex(self): def test_append_on_nested(self): # Test append_fields on nested fields - w = self.data[0] + w = self._create_arrays()[0] test = append_fields(w, 'C', data=[10, 20, 30]) control = ma.array([(1, (2, 3.0), 10), (4, (5, 6.0), 20), @@ -633,18 +629,18 @@ def test_append_on_nested(self): class TestStackArrays: # Test stack_arrays - def setup_method(self): + def _create_arrays(self): x = np.array([1, 2, ]) y = np.array([10, 20, 30]) z = np.array( [('A', 1.), ('B', 2.)], dtype=[('A', '|S3'), ('B', float)]) w = np.array([(1, (2, 3.0)), (4, (5, 6.0))], dtype=[('a', int), ('b', [('ba', float), ('bb', int)])]) - self.data = (w, x, y, z) + return w, x, y, z def test_solo(self): # Test stack_arrays on single arrays - (_, x, _, _) = self.data + x = self._create_arrays()[1] test = stack_arrays((x,)) assert_equal(test, x) assert_(test is x) @@ -655,7 +651,7 @@ def test_solo(self): def test_unnamed_fields(self): # Tests combinations of arrays w/o named fields - (_, x, y, _) = self.data + _, x, y, _ = self._create_arrays() test = stack_arrays((x, x), usemask=False) control = np.array([1, 2, 1, 2]) @@ -671,7 +667,7 @@ def test_unnamed_fields(self): def test_unnamed_and_named_fields(self): # Test combination of arrays w/ & w/o named fields - (_, x, _, z) = self.data + _, x, _, z = self._create_arrays() test = stack_arrays((x, z)) control = ma.array([(1, -1, -1), (2, -1, -1), @@ -703,7 +699,7 @@ def test_unnamed_and_named_fields(self): def test_matching_named_fields(self): # Test combination of arrays w/ matching field names - (_, x, _, z) = self.data + _, x, _, z = self._create_arrays() zz = np.array([('a', 10., 100.), ('b', 20., 200.), ('c', 30., 300.)], dtype=[('A', '|S3'), ('B', float), ('C', float)]) test = stack_arrays((z, zz)) @@ -731,7 +727,7 @@ def test_matching_named_fields(self): def test_defaults(self): # Test defaults: no exception raised if keys of defaults are not fields. - (_, _, _, z) = self.data + z = self._create_arrays()[-1] zz = np.array([('a', 10., 100.), ('b', 20., 200.), ('c', 30., 300.)], dtype=[('A', '|S3'), ('B', float), ('C', float)]) defaults = {'A': '???', 'B': -999., 'C': -9999., 'D': -99999.} @@ -803,18 +799,18 @@ def test_subdtype(self): class TestJoinBy: - def setup_method(self): - self.a = np.array(list(zip(np.arange(10), np.arange(50, 60), + def _create_arrays(self): + a = np.array(list(zip(np.arange(10), np.arange(50, 60), np.arange(100, 110))), dtype=[('a', int), ('b', int), ('c', int)]) - self.b = np.array(list(zip(np.arange(5, 15), np.arange(65, 75), + b = np.array(list(zip(np.arange(5, 15), np.arange(65, 75), np.arange(100, 110))), dtype=[('a', int), ('b', int), ('d', int)]) + return a, b def test_inner_join(self): # Basic test of join_by - a, b = self.a, self.b - + a, b = self._create_arrays() test = join_by('a', a, b, jointype='inner') control = np.array([(5, 55, 65, 105, 100), (6, 56, 66, 106, 101), (7, 57, 67, 107, 102), (8, 58, 68, 108, 103), @@ -824,8 +820,7 @@ def test_inner_join(self): assert_equal(test, control) def test_join(self): - a, b = self.a, self.b - + a, b = self._create_arrays() # Fixme, this test is broken #test = join_by(('a', 'b'), a, b) #control = np.array([(5, 55, 105, 100), (6, 56, 106, 101), @@ -834,7 +829,6 @@ def test_join(self): # dtype=[('a', int), ('b', int), # ('c', int), ('d', int)]) #assert_equal(test, control) - join_by(('a', 'b'), a, b) np.array([(5, 55, 105, 100), (6, 56, 106, 101), (7, 57, 107, 102), (8, 58, 108, 103), @@ -852,8 +846,7 @@ def test_join_subdtype(self): assert_equal(res, bar.view(ma.MaskedArray)) def test_outer_join(self): - a, b = self.a, self.b - + a, b = self._create_arrays() test = join_by(('a', 'b'), a, b, 'outer') control = ma.array([(0, 50, 100, -1), (1, 51, 101, -1), (2, 52, 102, -1), (3, 53, 103, -1), @@ -880,8 +873,7 @@ def test_outer_join(self): assert_equal(test, control) def test_leftouter_join(self): - a, b = self.a, self.b - + a, b = self._create_arrays() test = join_by(('a', 'b'), a, b, 'leftouter') control = ma.array([(0, 50, 100, -1), (1, 51, 101, -1), (2, 52, 102, -1), (3, 53, 103, -1), @@ -1030,19 +1022,17 @@ def test_two_keys_two_vars(self): assert_equal(test.dtype, control.dtype) assert_equal(test, control) + class TestAppendFieldsObj: """ Test append_fields with arrays containing objects """ # https://github.com/numpy/numpy/issues/2346 - def setup_method(self): - from datetime import date - self.data = {'obj': date(2000, 1, 1)} - def test_append_to_objects(self): "Test append_fields when the base array contains objects" - obj = self.data['obj'] + from datetime import date + obj = date(2000, 1, 1) x = np.array([(obj, 1.), (obj, 2.)], dtype=[('A', object), ('B', float)]) y = np.array([10, 20], dtype=int) diff --git a/numpy/lib/tests/test_stride_tricks.py b/numpy/lib/tests/test_stride_tricks.py index fe40c953a147..fb654b4cfb85 100644 --- a/numpy/lib/tests/test_stride_tricks.py +++ b/numpy/lib/tests/test_stride_tricks.py @@ -16,7 +16,6 @@ assert_equal, assert_raises, assert_raises_regex, - assert_warns, ) @@ -593,9 +592,9 @@ def test_writeable(): for array_is_broadcast, result in zip(is_broadcast, results): # This will change to False in a future version if array_is_broadcast: - with assert_warns(FutureWarning): + with pytest.warns(FutureWarning): assert_equal(result.flags.writeable, True) - with assert_warns(DeprecationWarning): + with pytest.warns(DeprecationWarning): result[:] = 0 # Warning not emitted, writing to the array resets it assert_equal(result.flags.writeable, True) diff --git a/numpy/lib/user_array.pyi b/numpy/lib/user_array.pyi index 9b90d893326b..af90126ad6c9 100644 --- a/numpy/lib/user_array.pyi +++ b/numpy/lib/user_array.pyi @@ -1 +1 @@ -from ._user_array_impl import container as container +from ._user_array_impl import container as container # type: ignore[deprecated] diff --git a/numpy/linalg/__init__.py b/numpy/linalg/__init__.py index fa230ece580c..cc482cfc9579 100644 --- a/numpy/linalg/__init__.py +++ b/numpy/linalg/__init__.py @@ -84,10 +84,7 @@ """ # To get sub-modules -from . import ( - _linalg, - linalg, # deprecated in NumPy 2.0 -) +from . import _linalg from ._linalg import * __all__ = _linalg.__all__.copy() # noqa: PLE0605 diff --git a/numpy/linalg/__init__.pyi b/numpy/linalg/__init__.pyi index 16c8048c1a11..4d6aca760637 100644 --- a/numpy/linalg/__init__.pyi +++ b/numpy/linalg/__init__.pyi @@ -1,6 +1,4 @@ -from . import _linalg as _linalg -from . import _umath_linalg as _umath_linalg -from . import linalg as linalg +from . import _linalg as _linalg, _umath_linalg as _umath_linalg from ._linalg import ( cholesky, cond, diff --git a/numpy/linalg/_linalg.py b/numpy/linalg/_linalg.py index d7850c4a0204..00e485346577 100644 --- a/numpy/linalg/_linalg.py +++ b/numpy/linalg/_linalg.py @@ -35,7 +35,9 @@ cdouble, complexfloating, count_nonzero, + cross as _core_cross, csingle, + diagonal as _core_diagonal, divide, dot, double, @@ -49,10 +51,13 @@ intp, isfinite, isnan, + matmul as _core_matmul, + matrix_transpose as _core_matrix_transpose, moveaxis, multiply, newaxis, object_, + outer as _core_outer, overrides, prod, reciprocal, @@ -62,34 +67,11 @@ sqrt, sum, swapaxes, - zeros, -) -from numpy._core import ( - cross as _core_cross, -) -from numpy._core import ( - diagonal as _core_diagonal, -) -from numpy._core import ( - matmul as _core_matmul, -) -from numpy._core import ( - matrix_transpose as _core_matrix_transpose, -) -from numpy._core import ( - outer as _core_outer, -) -from numpy._core import ( tensordot as _core_tensordot, -) -from numpy._core import ( trace as _core_trace, -) -from numpy._core import ( transpose as _core_transpose, -) -from numpy._core import ( vecdot as _core_vecdot, + zeros, ) from numpy._globals import _NoValue from numpy._typing import NDArray @@ -335,8 +317,7 @@ def tensorsolve(a, b, axes=None): Examples -------- >>> import numpy as np - >>> a = np.eye(2*3*4) - >>> a.shape = (2*3, 4, 2, 3, 4) + >>> a = np.eye(2*3*4).reshape((2*3, 4, 2, 3, 4)) >>> rng = np.random.default_rng() >>> b = rng.normal(size=(2*3, 4)) >>> x = np.linalg.tensorsolve(a, b) @@ -513,8 +494,7 @@ def tensorinv(a, ind=2): Examples -------- >>> import numpy as np - >>> a = np.eye(4*6) - >>> a.shape = (4, 6, 8, 3) + >>> a = np.eye(4*6).reshape((4, 6, 8, 3)) >>> ainv = np.linalg.tensorinv(a, ind=2) >>> ainv.shape (8, 3, 4, 6) @@ -523,8 +503,7 @@ def tensorinv(a, ind=2): >>> np.allclose(np.tensordot(ainv, b), np.linalg.tensorsolve(a, b)) True - >>> a = np.eye(4*6) - >>> a.shape = (24, 8, 3) + >>> a = np.eye(4*6).reshape((24, 8, 3)) >>> ainv = np.linalg.tensorinv(a, ind=1) >>> ainv.shape (8, 3, 24) @@ -959,7 +938,7 @@ def outer(x1, x2, /): An example using a "vector" of letters: - >>> x = np.array(['a', 'b', 'c'], dtype=object) + >>> x = np.array(['a', 'b', 'c'], dtype=np.object_) >>> np.linalg.outer(x, [1, 2, 3]) array([['a', 'aa', 'aaa'], ['b', 'bb', 'bbb'], @@ -1016,9 +995,6 @@ def qr(a, mode='reduced'): Returns ------- - When mode is 'reduced' or 'complete', the result will be a namedtuple with - the attributes `Q` and `R`. - Q : ndarray of float or complex, optional A matrix with orthonormal columns. When mode = 'complete' the result is an orthogonal/unitary matrix depending on whether or not @@ -1047,6 +1023,9 @@ def qr(a, mode='reduced'): Notes ----- + When mode is 'reduced' or 'complete', the result will be a namedtuple with + the attributes ``Q`` and ``R``. + This is an interface to the LAPACK routines ``dgeqrf``, ``zgeqrf``, ``dorgqr``, and ``zungqr``. @@ -1717,9 +1696,6 @@ def svd(a, full_matrices=True, compute_uv=True, hermitian=False): Returns ------- - When `compute_uv` is True, the result is a namedtuple with the following - attribute names: - U : { (..., M, M), (..., M, K) } array Unitary array(s). The first ``a.ndim - 2`` dimensions have the same size as those of the input `a`. The size of the last two dimensions @@ -1747,6 +1723,9 @@ def svd(a, full_matrices=True, compute_uv=True, hermitian=False): Notes ----- + When `compute_uv` is True, the result is a namedtuple with the following + attribute names: `U`, `S`, and `Vh`. + The decomposition is performed using LAPACK routine ``_gesdd``. SVD is usually described for the factorization of a 2D matrix :math:`A`. @@ -1785,7 +1764,7 @@ def svd(a, full_matrices=True, compute_uv=True, hermitian=False): ((9, 9), (6,), (6, 6)) >>> np.allclose(a, np.dot(U[:, :6] * S, Vh)) True - >>> smat = np.zeros((9, 6), dtype=complex) + >>> smat = np.zeros((9, 6), dtype=np.complex128) >>> smat[:6, :6] = np.diag(S) >>> np.allclose(a, np.dot(U, np.dot(smat, Vh))) True @@ -2029,6 +2008,7 @@ def cond(x, p=None): # contain nans in the entries where inversion failed. _assert_stacked_square(x) t, result_t = _commonType(x) + result_t = _realType(result_t) # condition number is always real signature = 'D->D' if isComplexType(t) else 'd->d' with errstate(all='ignore'): invx = _umath_linalg.inv(x, signature=signature) @@ -2036,18 +2016,14 @@ def cond(x, p=None): r = r.astype(result_t, copy=False) # Convert nans to infs unless the original array had nan entries - r = asarray(r) nan_mask = isnan(r) if nan_mask.any(): nan_mask &= ~isnan(x).any(axis=(-2, -1)) if r.ndim > 0: r[nan_mask] = inf elif nan_mask: - r[()] = inf - - # Convention is to return scalars instead of 0d arrays - if r.ndim == 0: - r = r[()] + # Convention is to return scalars instead of 0d arrays. + r = r.dtype.type(inf) return r @@ -2093,9 +2069,9 @@ def matrix_rank(A, tol=None, hermitian=False, *, rtol=None): The default threshold to detect rank deficiency is a test on the magnitude of the singular values of `A`. By default, we identify singular values less than ``S.max() * max(M, N) * eps`` as indicating rank deficiency - (with the symbols defined above). This is the algorithm MATLAB uses [1]. + (with the symbols defined above). This is the algorithm MATLAB uses [1]_. It also appears in *Numerical recipes* in the discussion of SVD solutions - for linear least squares [2]. + for linear least squares [2]_. This default threshold is designed to detect rank deficiency accounting for the numerical errors of the SVD computation. Imagine that there @@ -2962,7 +2938,7 @@ def cost(A, B): return A.shape[0] * A.shape[1] * B.shape[1] Assume we have three matrices - :math:`A_{10 \times 100}, B_{100 \times 5}, C_{5 \times 50}`. + :math:`A_{10 \\times 100}, B_{100 \\times 5}, C_{5 \\times 50}`. The costs for the two different parenthesizations are as follows:: diff --git a/numpy/linalg/_linalg.pyi b/numpy/linalg/_linalg.pyi index 3f318a892da5..af876d911826 100644 --- a/numpy/linalg/_linalg.pyi +++ b/numpy/linalg/_linalg.pyi @@ -1,37 +1,36 @@ -from collections.abc import Iterable +from collections.abc import Iterable, Sequence from typing import ( Any, + Generic, + Literal as L, NamedTuple, Never, + Protocol, SupportsIndex, SupportsInt, - TypeAlias, - TypeVar, overload, + type_check_only, ) -from typing import Literal as L +from typing_extensions import TypeVar import numpy as np from numpy import ( - complex128, complexfloating, float64, - # other floating, - int32, object_, signedinteger, timedelta64, unsignedinteger, - # re-exports vecdot, ) from numpy._core.fromnumeric import matrix_transpose -from numpy._core.numeric import tensordot +from numpy._globals import _NoValue, _NoValueType from numpy._typing import ( ArrayLike, DTypeLike, NDArray, + _AnyShape, _ArrayLike, _ArrayLikeBool_co, _ArrayLikeComplex_co, @@ -40,6 +39,11 @@ from numpy._typing import ( _ArrayLikeObject_co, _ArrayLikeTD64_co, _ArrayLikeUInt_co, + _ComplexLike_co, + _DTypeLike, + _NestedSequence, + _Shape, + _ShapeLike, ) from numpy.linalg import LinAlgError @@ -78,362 +82,735 @@ __all__ = [ "vecdot", ] -_ArrayT = TypeVar("_ArrayT", bound=NDArray[Any]) +type _AtMost1D = tuple[()] | tuple[int] +type _AtLeast2D = tuple[int, int, *tuple[int, ...]] +type _AtLeast3D = tuple[int, int, int, *tuple[int, ...]] +type _AtLeast4D = tuple[int, int, int, int, *tuple[int, ...]] +type _JustAnyShape = tuple[Never, ...] # workaround for microsoft/pyright#10232 + +type _tuple2[T] = tuple[T, T] + +type _inexact32 = np.float32 | np.complex64 +type _to_float64 = np.float64 | np.integer | np.bool +type _to_inexact64 = np.complex128 | _to_float64 +type _to_complex = np.number | np.bool + +type _Array2D[ScalarT: np.generic] = np.ndarray[tuple[int, int], np.dtype[ScalarT]] +type _Array3ND[ScalarT: np.generic] = np.ndarray[_AtLeast3D, np.dtype[ScalarT]] + +# anything that safe-casts (from floating) into float64/complex128 +type _ToArrayF64 = _ArrayLike[_to_float64] | _NestedSequence[float] +type _ToArrayC128 = _ArrayLike[_to_inexact64] | _NestedSequence[complex] +# the invariant `list` type avoids overlap with bool, int, etc +type _AsArrayF64 = _ArrayLike[np.float64] | list[float] | _NestedSequence[list[float]] +type _AsArrayC128 = _ArrayLike[np.complex128] | list[complex] | _NestedSequence[list[complex]] + +type _ToArrayF64_2d = _Array2D[_to_float64] | Sequence[Sequence[float]] +type _ToArrayF64_3nd = _Array3ND[_to_float64] | Sequence[Sequence[_NestedSequence[float]]] +type _ToArrayC128_2d = _Array2D[_to_inexact64] | Sequence[Sequence[complex]] +type _ToArrayC128_3nd = _Array3ND[_to_inexact64] | Sequence[Sequence[_NestedSequence[complex]]] + +type _OrderKind = L[1, -1, 2, -2, "fro", "nuc"] | float # only accepts `-inf` and `inf` as `float` +type _SideKind = L["L", "U", "l", "u"] +type _NonNegInt = L[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16] +type _NegInt = L[-1, -2, -3, -4, -5, -6, -7, -8, -9, -10, -11, -12, -13, -14, -15, -16] + +type _LstSqResult[ShapeT: tuple[int, ...], InexactT: np.inexact, FloatingT: np.floating] = tuple[ + np.ndarray[ShapeT, np.dtype[InexactT]], # least-squares solution + np.ndarray[tuple[int], np.dtype[FloatingT]], # residuals + np.int32, # rank + np.ndarray[tuple[int], np.dtype[FloatingT]], # singular values +] + +_FloatingT_co = TypeVar("_FloatingT_co", bound=np.floating, default=Any, covariant=True) +_FloatingOrArrayT_co = TypeVar("_FloatingOrArrayT_co", bound=np.floating | NDArray[np.floating], default=Any, covariant=True) +_InexactT_co = TypeVar("_InexactT_co", bound=np.inexact, default=Any, covariant=True) +_InexactOrArrayT_co = TypeVar("_InexactOrArrayT_co", bound=np.inexact | NDArray[np.inexact], default=Any, covariant=True) -_ModeKind: TypeAlias = L["reduced", "complete", "r", "raw"] +# shape-typed variant of numpy._typing._SupportsArray +@type_check_only +class _SupportsArray[ShapeT: _Shape, DTypeT: np.dtype](Protocol): + def __array__(self, /) -> np.ndarray[ShapeT, DTypeT]: ... ### fortran_int = np.intc -class EigResult(NamedTuple): - eigenvalues: NDArray[Any] - eigenvectors: NDArray[Any] - -class EighResult(NamedTuple): - eigenvalues: NDArray[Any] - eigenvectors: NDArray[Any] - -class QRResult(NamedTuple): - Q: NDArray[Any] - R: NDArray[Any] - -class SlogdetResult(NamedTuple): - # TODO: `sign` and `logabsdet` are scalars for input 2D arrays and - # a `(x.ndim - 2)`` dimensionl arrays otherwise - sign: Any - logabsdet: Any - -class SVDResult(NamedTuple): - U: NDArray[Any] - S: NDArray[Any] - Vh: NDArray[Any] - -@overload -def tensorsolve( - a: _ArrayLikeInt_co, - b: _ArrayLikeInt_co, - axes: Iterable[int] | None = ..., -) -> NDArray[float64]: ... -@overload -def tensorsolve( - a: _ArrayLikeFloat_co, - b: _ArrayLikeFloat_co, - axes: Iterable[int] | None = ..., -) -> NDArray[floating]: ... -@overload -def tensorsolve( - a: _ArrayLikeComplex_co, - b: _ArrayLikeComplex_co, - axes: Iterable[int] | None = ..., -) -> NDArray[complexfloating]: ... - -@overload -def solve( - a: _ArrayLikeInt_co, - b: _ArrayLikeInt_co, -) -> NDArray[float64]: ... -@overload -def solve( - a: _ArrayLikeFloat_co, - b: _ArrayLikeFloat_co, -) -> NDArray[floating]: ... -@overload -def solve( - a: _ArrayLikeComplex_co, - b: _ArrayLikeComplex_co, -) -> NDArray[complexfloating]: ... - -@overload -def tensorinv( - a: _ArrayLikeInt_co, - ind: int = ..., -) -> NDArray[float64]: ... -@overload -def tensorinv( - a: _ArrayLikeFloat_co, - ind: int = ..., -) -> NDArray[floating]: ... -@overload -def tensorinv( +# NOTE: These named tuple types are only generic when `typing.TYPE_CHECKING` + +class EigResult(NamedTuple, Generic[_InexactT_co]): + eigenvalues: NDArray[_InexactT_co] + eigenvectors: NDArray[_InexactT_co] + +class EighResult(NamedTuple, Generic[_FloatingT_co, _InexactT_co]): + eigenvalues: NDArray[_FloatingT_co] + eigenvectors: NDArray[_InexactT_co] + +class QRResult(NamedTuple, Generic[_InexactT_co]): + Q: NDArray[_InexactT_co] + R: NDArray[_InexactT_co] + +class SVDResult(NamedTuple, Generic[_FloatingT_co, _InexactT_co]): + U: NDArray[_InexactT_co] + S: NDArray[_FloatingT_co] + Vh: NDArray[_InexactT_co] + +class SlogdetResult(NamedTuple, Generic[_FloatingOrArrayT_co, _InexactOrArrayT_co]): + sign: _FloatingOrArrayT_co + logabsdet: _InexactOrArrayT_co + +# keep in sync with `solve` +@overload # ~float64, +float64 +def tensorsolve(a: _ToArrayF64, b: _ArrayLikeFloat_co, axes: Iterable[int] | None = None) -> NDArray[np.float64]: ... +@overload # +float64, ~float64 +def tensorsolve(a: _ArrayLikeFloat_co, b: _ToArrayF64, axes: Iterable[int] | None = None) -> NDArray[np.float64]: ... +@overload # ~float32, ~float32 +def tensorsolve(a: _ArrayLike[np.float32], b: _ArrayLike[np.float32], axes: Iterable[int] | None = None) -> NDArray[np.float32]: ... +@overload # +float, +float +def tensorsolve(a: _ArrayLikeFloat_co, b: _ArrayLikeFloat_co, axes: Iterable[int] | None = None) -> NDArray[np.float64 | Any]: ... +@overload # ~complex128, +complex128 +def tensorsolve(a: _AsArrayC128, b: _ArrayLikeComplex_co, axes: Iterable[int] | None = None) -> NDArray[np.complex128]: ... +@overload # +complex128, ~complex128 +def tensorsolve(a: _ArrayLikeComplex_co, b: _AsArrayC128, axes: Iterable[int] | None = None) -> NDArray[np.complex128]: ... +@overload # ~complex64, +complex64 +def tensorsolve(a: _ArrayLike[np.complex64], b: _ArrayLike[np.complex64 | np.float32], axes: Iterable[int] | None = None) -> NDArray[np.complex64]: ... +@overload # +complex64, ~complex64 +def tensorsolve(a: _ArrayLike[np.complex64 | np.float32], b: _ArrayLike[np.complex64], axes: Iterable[int] | None = None) -> NDArray[np.complex64]: ... +@overload # +complex, +complex +def tensorsolve(a: _ArrayLikeComplex_co, b: _ArrayLikeComplex_co, axes: Iterable[int] | None = None) -> NDArray[np.complex128 | Any]: ... + +# keep in sync with `tensorsolve` +@overload # ~float64, +float64 +def solve(a: _ToArrayF64, b: _ArrayLikeFloat_co) -> NDArray[np.float64]: ... +@overload # +float64, ~float64 +def solve(a: _ArrayLikeFloat_co, b: _ToArrayF64) -> NDArray[np.float64]: ... +@overload # ~float32, ~float32 +def solve(a: _ArrayLike[np.float32], b: _ArrayLike[np.float32]) -> NDArray[np.float32]: ... +@overload # +float, +float +def solve(a: _ArrayLikeFloat_co, b: _ArrayLikeFloat_co) -> NDArray[np.float64 | Any]: ... +@overload # ~complex128, +complex128 +def solve(a: _AsArrayC128, b: _ArrayLikeComplex_co) -> NDArray[np.complex128]: ... +@overload # +complex128, ~complex128 +def solve(a: _ArrayLikeComplex_co, b: _AsArrayC128) -> NDArray[np.complex128]: ... +@overload # ~complex64, +complex64 +def solve(a: _ArrayLike[np.complex64], b: _ArrayLike[np.complex64 | np.float32]) -> NDArray[np.complex64]: ... +@overload # +complex64, ~complex64 +def solve(a: _ArrayLike[np.complex64 | np.float32], b: _ArrayLike[np.complex64]) -> NDArray[np.complex64]: ... +@overload # +complex, +complex +def solve(a: _ArrayLikeComplex_co, b: _ArrayLikeComplex_co) -> NDArray[np.complex128 | Any]: ... + +# keep in sync with the other inverse functions and cholesky +@overload # inexact32 +def tensorinv[ScalarT: _inexact32](a: _ArrayLike[ScalarT], ind: int = 2) -> NDArray[ScalarT]: ... +@overload # +float64 +def tensorinv(a: _ToArrayF64, ind: int = 2) -> NDArray[np.float64]: ... +@overload # ~complex128 +def tensorinv(a: _AsArrayC128, ind: int = 2) -> NDArray[np.complex128]: ... +@overload # fallback +def tensorinv(a: _ArrayLikeComplex_co, ind: int = 2) -> np.ndarray: ... + +# keep in sync with the other inverse functions and cholesky +@overload # inexact32 +def inv[ScalarT: _inexact32](a: _ArrayLike[ScalarT]) -> NDArray[ScalarT]: ... +@overload # +float64 +def inv(a: _ToArrayF64) -> NDArray[np.float64]: ... +@overload # ~complex128 +def inv(a: _AsArrayC128) -> NDArray[np.complex128]: ... +@overload # fallback +def inv(a: _ArrayLikeComplex_co) -> np.ndarray: ... + +# keep in sync with the other inverse functions and cholesky +@overload # inexact32 +def pinv[ScalarT: _inexact32]( + a: _ArrayLike[ScalarT], + rcond: _ArrayLikeFloat_co | None = None, + hermitian: bool = False, + *, + rtol: _ArrayLikeFloat_co | _NoValueType = _NoValue, +) -> NDArray[ScalarT]: ... +@overload # +float64 +def pinv( + a: _ToArrayF64, + rcond: _ArrayLikeFloat_co | None = None, + hermitian: bool = False, + *, + rtol: _ArrayLikeFloat_co | _NoValueType = _NoValue, +) -> NDArray[np.float64]: ... +@overload # ~complex128 +def pinv( + a: _AsArrayC128, + rcond: _ArrayLikeFloat_co | None = None, + hermitian: bool = False, + *, + rtol: _ArrayLikeFloat_co | _NoValueType = _NoValue, +) -> NDArray[np.complex128]: ... +@overload # fallback +def pinv( a: _ArrayLikeComplex_co, - ind: int = ..., -) -> NDArray[complexfloating]: ... + rcond: _ArrayLikeFloat_co | None = None, + hermitian: bool = False, + *, + rtol: _ArrayLikeFloat_co | _NoValueType = _NoValue, +) -> NDArray[Any]: ... -@overload -def inv(a: _ArrayLikeInt_co) -> NDArray[float64]: ... -@overload -def inv(a: _ArrayLikeFloat_co) -> NDArray[floating]: ... -@overload -def inv(a: _ArrayLikeComplex_co) -> NDArray[complexfloating]: ... +# keep in sync with the inverse functions +@overload # inexact32 +def cholesky[ScalarT: _inexact32](a: _ArrayLike[ScalarT], /, *, upper: bool = False) -> NDArray[ScalarT]: ... +@overload # +float64 +def cholesky(a: _ToArrayF64, /, *, upper: bool = False) -> NDArray[np.float64]: ... +@overload # ~complex128 +def cholesky(a: _AsArrayC128, /, *, upper: bool = False) -> NDArray[np.complex128]: ... +@overload # fallback +def cholesky(a: _ArrayLikeComplex_co, /, *, upper: bool = False) -> np.ndarray: ... -# TODO: The supported input and output dtypes are dependent on the value of `n`. -# For example: `n < 0` always casts integer types to float64 -def matrix_power( - a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - n: SupportsIndex, -) -> NDArray[Any]: ... +# NOTE: Technically this also accepts boolean array-likes, but that case is not very useful, so we skip it. +# If you have a use case for it, please open an issue. +@overload # +int, n â‰Ĩ 0 +def matrix_power(a: _NestedSequence[int], n: _NonNegInt) -> NDArray[np.int_]: ... +@overload # +integer | ~object, n â‰Ĩ 0 +def matrix_power[ScalarT: np.integer | np.object_](a: _ArrayLike[ScalarT], n: _NonNegInt) -> NDArray[ScalarT]: ... +@overload # +float64, n < 0 +def matrix_power(a: _ToArrayF64, n: _NegInt) -> NDArray[np.float64]: ... +@overload # ~float64 +def matrix_power(a: _AsArrayF64, n: SupportsIndex) -> NDArray[np.float64]: ... +@overload # ~complex128 +def matrix_power(a: _AsArrayC128, n: SupportsIndex) -> NDArray[np.complex128]: ... +@overload # ~inexact32 +def matrix_power[ScalarT: _inexact32](a: _ArrayLike[ScalarT], n: SupportsIndex) -> NDArray[ScalarT]: ... +@overload # fallback +def matrix_power(a: _ArrayLikeComplex_co | _ArrayLikeObject_co, n: SupportsIndex) -> np.ndarray: ... +# TODO: narrow return types @overload -def cholesky(a: _ArrayLikeInt_co, /, *, upper: bool = False) -> NDArray[float64]: ... -@overload -def cholesky(a: _ArrayLikeFloat_co, /, *, upper: bool = False) -> NDArray[floating]: ... -@overload -def cholesky(a: _ArrayLikeComplex_co, /, *, upper: bool = False) -> NDArray[complexfloating]: ... - +def outer(x1: _ArrayLike[Never], x2: _ArrayLike[Never], /) -> NDArray[Any]: ... @overload -def outer(x1: _ArrayLike[Never], x2: _ArrayLike[Never]) -> NDArray[Any]: ... +def outer(x1: _ArrayLikeBool_co, x2: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... @overload -def outer(x1: _ArrayLikeBool_co, x2: _ArrayLikeBool_co) -> NDArray[np.bool]: ... +def outer[ScalarT: np.number](x1: _ArrayLike[ScalarT], x2: _ArrayLike[ScalarT], /) -> NDArray[ScalarT]: ... @overload -def outer(x1: _ArrayLikeUInt_co, x2: _ArrayLikeUInt_co) -> NDArray[unsignedinteger]: ... +def outer(x1: _ArrayLikeUInt_co, x2: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... @overload -def outer(x1: _ArrayLikeInt_co, x2: _ArrayLikeInt_co) -> NDArray[signedinteger]: ... +def outer(x1: _ArrayLikeInt_co, x2: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... @overload -def outer(x1: _ArrayLikeFloat_co, x2: _ArrayLikeFloat_co) -> NDArray[floating]: ... +def outer(x1: _ArrayLikeFloat_co, x2: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... @overload -def outer( - x1: _ArrayLikeComplex_co, - x2: _ArrayLikeComplex_co, -) -> NDArray[complexfloating]: ... +def outer(x1: _ArrayLikeComplex_co, x2: _ArrayLikeComplex_co, /) -> NDArray[complexfloating]: ... @overload -def outer( - x1: _ArrayLikeTD64_co, - x2: _ArrayLikeTD64_co, - out: None = ..., -) -> NDArray[timedelta64]: ... +def outer(x1: _ArrayLikeTD64_co, x2: _ArrayLikeTD64_co, /) -> NDArray[timedelta64]: ... @overload -def outer(x1: _ArrayLikeObject_co, x2: _ArrayLikeObject_co) -> NDArray[object_]: ... +def outer(x1: _ArrayLikeObject_co, x2: _ArrayLikeObject_co, /) -> NDArray[object_]: ... @overload def outer( x1: _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeObject_co, x2: _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeObject_co, -) -> _ArrayT: ... - -@overload -def qr(a: _ArrayLikeInt_co, mode: _ModeKind = ...) -> QRResult: ... -@overload -def qr(a: _ArrayLikeFloat_co, mode: _ModeKind = ...) -> QRResult: ... -@overload -def qr(a: _ArrayLikeComplex_co, mode: _ModeKind = ...) -> QRResult: ... - -@overload -def eigvals(a: _ArrayLikeInt_co) -> NDArray[float64] | NDArray[complex128]: ... -@overload -def eigvals(a: _ArrayLikeFloat_co) -> NDArray[floating] | NDArray[complexfloating]: ... -@overload -def eigvals(a: _ArrayLikeComplex_co) -> NDArray[complexfloating]: ... - -@overload -def eigvalsh(a: _ArrayLikeInt_co, UPLO: L["L", "U", "l", "u"] = ...) -> NDArray[float64]: ... -@overload -def eigvalsh(a: _ArrayLikeComplex_co, UPLO: L["L", "U", "l", "u"] = ...) -> NDArray[floating]: ... + /, +) -> NDArray[Any]: ... -@overload -def eig(a: _ArrayLikeInt_co) -> EigResult: ... -@overload -def eig(a: _ArrayLikeFloat_co) -> EigResult: ... -@overload +# NOTE: for real input the output dtype (floating/complexfloating) depends on the specific values +@overload # abstract `inexact` and `floating` (excluding concrete types) +def eig(a: NDArray[np.inexact[Never]]) -> EigResult: ... +@overload # ~complex128 +def eig(a: _AsArrayC128) -> EigResult[np.complex128]: ... +@overload # +float64 +def eig(a: _ToArrayF64) -> EigResult[np.complex128] | EigResult[np.float64]: ... +@overload # ~complex64 +def eig(a: _ArrayLike[np.complex64]) -> EigResult[np.complex64]: ... +@overload # ~float32 +def eig(a: _ArrayLike[np.float32]) -> EigResult[np.complex64] | EigResult[np.float32]: ... +@overload # fallback def eig(a: _ArrayLikeComplex_co) -> EigResult: ... -@overload -def eigh( - a: _ArrayLikeInt_co, - UPLO: L["L", "U", "l", "u"] = ..., -) -> EighResult: ... -@overload -def eigh( - a: _ArrayLikeFloat_co, - UPLO: L["L", "U", "l", "u"] = ..., -) -> EighResult: ... -@overload -def eigh( - a: _ArrayLikeComplex_co, - UPLO: L["L", "U", "l", "u"] = ..., -) -> EighResult: ... - -@overload +# +@overload # workaround for microsoft/pyright#10232 +def eigh(a: NDArray[Never], UPLO: _SideKind = "L") -> EighResult: ... +@overload # ~inexact32 +def eigh[ScalarT: _inexact32](a: _ArrayLike[ScalarT], UPLO: _SideKind = "L") -> EighResult[np.float32, ScalarT]: ... +@overload # +float64 +def eigh(a: _ToArrayF64, UPLO: _SideKind = "L") -> EighResult[np.float64, np.float64]: ... +@overload # ~complex128 +def eigh(a: _AsArrayC128, UPLO: _SideKind = "L") -> EighResult[np.float64, np.complex128]: ... +@overload # fallback +def eigh(a: _ArrayLikeComplex_co, UPLO: _SideKind = "L") -> EighResult: ... + +# +@overload # ~inexact32, reduced|complete +def qr[ScalarT: _inexact32](a: _ArrayLike[ScalarT], mode: L["reduced", "complete"] = "reduced") -> QRResult[ScalarT]: ... +@overload # ~inexact32, r +def qr[ScalarT: _inexact32](a: _ArrayLike[ScalarT], mode: L["r"]) -> NDArray[ScalarT]: ... +@overload # ~inexact32, raw +def qr[ScalarT: _inexact32](a: _ArrayLike[ScalarT], mode: L["raw"]) -> _tuple2[NDArray[ScalarT]]: ... +@overload # +float64, reduced|complete +def qr(a: _ToArrayF64, mode: L["reduced", "complete"] = "reduced") -> QRResult[np.float64]: ... +@overload # +float64, r +def qr(a: _ToArrayF64, mode: L["r"]) -> NDArray[np.float64]: ... +@overload # +float64, raw +def qr(a: _ToArrayF64, mode: L["raw"]) -> _tuple2[NDArray[np.float64]]: ... +@overload # ~complex128, reduced|complete +def qr(a: _AsArrayC128, mode: L["reduced", "complete"] = "reduced") -> QRResult[np.complex128]: ... +@overload # ~complex128, r +def qr(a: _AsArrayC128, mode: L["r"]) -> NDArray[np.complex128]: ... +@overload # ~complex128, raw +def qr(a: _AsArrayC128, mode: L["raw"]) -> _tuple2[NDArray[np.complex128]]: ... +@overload # fallback, reduced|complete +def qr(a: _ArrayLikeComplex_co, mode: L["reduced", "complete"] = "reduced") -> QRResult: ... +@overload # fallback, r +def qr(a: _ArrayLikeComplex_co, mode: L["r"]) -> np.ndarray: ... +@overload # fallback, raw +def qr(a: _ArrayLikeComplex_co, mode: L["raw"]) -> _tuple2[np.ndarray]: ... + +# +@overload # workaround for microsoft/pyright#10232, compute_uv=True (default) +def svd(a: NDArray[Never], full_matrices: bool = True, compute_uv: L[True] = True, hermitian: bool = False) -> SVDResult: ... +@overload # workaround for microsoft/pyright#10232, compute_uv=False (positional) +def svd(a: NDArray[Never], full_matrices: bool, compute_uv: L[False], hermitian: bool = False) -> np.ndarray: ... +@overload # workaround for microsoft/pyright#10232, compute_uv=False (keyword) +def svd(a: NDArray[Never], full_matrices: bool = True, *, compute_uv: L[False], hermitian: bool = False) -> np.ndarray: ... +@overload # ~inexact32, compute_uv=True (default) +def svd[ScalarT: _inexact32]( + a: _ArrayLike[ScalarT], full_matrices: bool = True, compute_uv: L[True] = True, hermitian: bool = False +) -> SVDResult[np.float32, ScalarT]: ... +@overload # ~inexact32, compute_uv=False (positional) +def svd(a: _ArrayLike[_inexact32], full_matrices: bool, compute_uv: L[False], hermitian: bool = False) -> NDArray[np.float32]: ... +@overload # ~inexact32, compute_uv=False (keyword) def svd( - a: _ArrayLikeInt_co, - full_matrices: bool = ..., - compute_uv: L[True] = ..., - hermitian: bool = ..., -) -> SVDResult: ... -@overload + a: _ArrayLike[_inexact32], full_matrices: bool = True, *, compute_uv: L[False], hermitian: bool = False +) -> NDArray[np.float32]: ... +@overload # +float64, compute_uv=True (default) def svd( - a: _ArrayLikeFloat_co, - full_matrices: bool = ..., - compute_uv: L[True] = ..., - hermitian: bool = ..., -) -> SVDResult: ... -@overload + a: _ToArrayF64, full_matrices: bool = True, compute_uv: L[True] = True, hermitian: bool = False +) -> SVDResult[np.float64, np.float64]: ... +@overload # ~complex128, compute_uv=True (default) def svd( - a: _ArrayLikeComplex_co, - full_matrices: bool = ..., - compute_uv: L[True] = ..., - hermitian: bool = ..., -) -> SVDResult: ... -@overload + a: _AsArrayC128, full_matrices: bool = True, compute_uv: L[True] = True, hermitian: bool = False +) -> SVDResult[np.float64, np.complex128]: ... +@overload # +float64 | ~complex128, compute_uv=False (positional) +def svd(a: _ToArrayC128, full_matrices: bool, compute_uv: L[False], hermitian: bool = False) -> NDArray[np.float64]: ... +@overload # +float64 | ~complex128, compute_uv=False (keyword) +def svd(a: _ToArrayC128, full_matrices: bool = True, *, compute_uv: L[False], hermitian: bool = False) -> NDArray[np.float64]: ... +@overload # fallback, compute_uv=True (default) def svd( - a: _ArrayLikeInt_co, - full_matrices: bool = ..., - compute_uv: L[False] = ..., - hermitian: bool = ..., -) -> NDArray[float64]: ... -@overload -def svd( - a: _ArrayLikeComplex_co, - full_matrices: bool = ..., - compute_uv: L[False] = ..., - hermitian: bool = ..., -) -> NDArray[floating]: ... - -def svdvals( - x: _ArrayLikeInt_co | _ArrayLikeFloat_co | _ArrayLikeComplex_co -) -> NDArray[floating]: ... - -# TODO: Returns a scalar for 2D arrays and -# a `(x.ndim - 2)`` dimensionl array otherwise -def cond(x: _ArrayLikeComplex_co, p: float | L["fro", "nuc"] | None = ...) -> Any: ... - -# TODO: Returns `int` for <2D arrays and `intp` otherwise + a: _ArrayLikeComplex_co, full_matrices: bool = True, compute_uv: L[True] = True, hermitian: bool = False +) -> SVDResult: ... +@overload # fallback, compute_uv=False (positional) +def svd(a: _ArrayLikeComplex_co, full_matrices: bool, compute_uv: L[False], hermitian: bool = False) -> np.ndarray: ... +@overload # fallback, compute_uv=False (keyword) +def svd(a: _ArrayLikeComplex_co, full_matrices: bool = True, *, compute_uv: L[False], hermitian: bool = False) -> np.ndarray: ... + +# NOTE: for real input the output dtype (floating/complexfloating) depends on the specific values +@overload # abstract `inexact` and `floating` (excluding concrete types) +def eigvals(a: NDArray[np.inexact[Never]]) -> np.ndarray: ... +@overload # ~complex128 +def eigvals(a: _AsArrayC128) -> NDArray[np.complex128]: ... +@overload # +float64 +def eigvals(a: _ToArrayF64) -> NDArray[np.complex128] | NDArray[np.float64]: ... +@overload # ~complex64 +def eigvals(a: _ArrayLike[np.complex64]) -> NDArray[np.complex64]: ... +@overload # ~float32 +def eigvals(a: _ArrayLike[np.float32]) -> NDArray[np.complex64] | NDArray[np.float32]: ... +@overload # fallback +def eigvals(a: _ArrayLikeComplex_co) -> np.ndarray: ... + +# keep in sync with svdvals +@overload # abstract `inexact` (excluding concrete types) +def eigvalsh(a: NDArray[np.inexact[Never]], UPLO: _SideKind = "L") -> NDArray[np.floating]: ... +@overload # ~inexact32 +def eigvalsh(a: _ArrayLike[_inexact32], UPLO: _SideKind = "L") -> NDArray[np.float32]: ... +@overload # +complex128 +def eigvalsh(a: _ToArrayC128, UPLO: _SideKind = "L") -> NDArray[np.float64]: ... +@overload # fallback +def eigvalsh(a: _ArrayLikeComplex_co, UPLO: _SideKind = "L") -> NDArray[np.floating]: ... + +# keep in sync with eigvalsh +@overload # abstract `inexact` (excluding concrete types) +def svdvals(a: NDArray[np.inexact[Never]], /) -> NDArray[np.floating]: ... +@overload # ~inexact32 +def svdvals(a: _ArrayLike[_inexact32], /) -> NDArray[np.float32]: ... +@overload # +complex128 +def svdvals(a: _ToArrayC128, /) -> NDArray[np.float64]: ... +@overload # fallback +def svdvals(a: _ArrayLikeComplex_co, /) -> NDArray[np.floating]: ... + +# +@overload # workaround for microsoft/pyright#10232 +def matrix_rank( + A: np.ndarray[tuple[Never, ...], np.dtype[np.number]], + tol: _ArrayLikeFloat_co | None = None, + hermitian: bool = False, + *, + rtol: _ArrayLikeFloat_co | None = None, +) -> Any: ... +@overload # <2d +def matrix_rank( + A: complex | Sequence[complex] | np.ndarray[_AtMost1D, np.dtype[np.number]], + tol: _ArrayLikeFloat_co | None = None, + hermitian: bool = False, + *, + rtol: _ArrayLikeFloat_co | None = None, +) -> L[0, 1]: ... +@overload # =2d +def matrix_rank( + A: Sequence[Sequence[complex]] | _Array2D[np.number], + tol: _ArrayLikeFloat_co | None = None, + hermitian: bool = False, + *, + rtol: _ArrayLikeFloat_co | None = None, +) -> np.int_: ... +@overload # =3d +def matrix_rank( + A: Sequence[Sequence[Sequence[complex]]] | np.ndarray[tuple[int, int, int], np.dtype[np.number]], + tol: _ArrayLikeFloat_co | None = None, + hermitian: bool = False, + *, + rtol: _ArrayLikeFloat_co | None = None, +) -> np.ndarray[tuple[int], np.dtype[np.int_]]: ... +@overload # â‰Ĩ4d +def matrix_rank( + A: Sequence[Sequence[Sequence[_NestedSequence[complex]]]] | np.ndarray[_AtLeast4D, np.dtype[np.number]], + tol: _ArrayLikeFloat_co | None = None, + hermitian: bool = False, + *, + rtol: _ArrayLikeFloat_co | None = None, +) -> NDArray[np.int_]: ... +@overload # ?d def matrix_rank( A: _ArrayLikeComplex_co, - tol: _ArrayLikeFloat_co | None = ..., - hermitian: bool = ..., + tol: _ArrayLikeFloat_co | None = None, + hermitian: bool = False, *, - rtol: _ArrayLikeFloat_co | None = ..., + rtol: _ArrayLikeFloat_co | None = None, ) -> Any: ... -@overload -def pinv( - a: _ArrayLikeInt_co, - rcond: _ArrayLikeFloat_co = ..., - hermitian: bool = ..., -) -> NDArray[float64]: ... -@overload -def pinv( - a: _ArrayLikeFloat_co, - rcond: _ArrayLikeFloat_co = ..., - hermitian: bool = ..., -) -> NDArray[floating]: ... -@overload -def pinv( - a: _ArrayLikeComplex_co, - rcond: _ArrayLikeFloat_co = ..., - hermitian: bool = ..., -) -> NDArray[complexfloating]: ... - -# TODO: Returns a 2-tuple of scalars for 2D arrays and -# a 2-tuple of `(a.ndim - 2)`` dimensionl arrays otherwise +# +@overload # workaround for microsoft/pyright#10232 +def cond(x: np.ndarray[_JustAnyShape, np.dtype[np.number]], p: _OrderKind | None = None) -> Any: ... +@overload # 2d ~inexact32 +def cond(x: _Array2D[_inexact32], p: _OrderKind | None = None) -> np.float32: ... +@overload # 2d +inexact64 +def cond(x: _ToArrayC128_2d, p: _OrderKind | None = None) -> np.float64: ... +@overload # 2d ~number +def cond(x: _Array2D[np.number], p: _OrderKind | None = None) -> np.floating: ... +@overload # >2d ~inexact32 +def cond(x: np.ndarray[_AtLeast3D, np.dtype[_inexact32]], p: _OrderKind | None = None) -> NDArray[np.float32]: ... +@overload # >2d +inexact64 +def cond(x: _ToArrayC128_3nd, p: _OrderKind | None = None) -> NDArray[np.float64]: ... +@overload # >2d ~number +def cond(x: np.ndarray[_AtLeast3D, np.dtype[np.number]], p: _OrderKind | None = None) -> NDArray[np.floating]: ... +@overload # fallback +def cond(x: _ArrayLikeComplex_co, p: _OrderKind | None = None) -> Any: ... + +# keep in sync with `det` +@overload # workaround for microsoft/pyright#10232 +def slogdet(a: np.ndarray[_JustAnyShape, np.dtype[np.number]]) -> SlogdetResult: ... +@overload # 2d ~inexact32 +def slogdet[ScalarT: _inexact32](a: _Array2D[ScalarT]) -> SlogdetResult[np.float32, ScalarT]: ... +@overload # >2d ~inexact32 +def slogdet[ScalarT: _inexact32](a: _Array3ND[ScalarT]) -> SlogdetResult[NDArray[np.float32], NDArray[ScalarT]]: ... +@overload # 2d +float64 +def slogdet(a: _Array2D[_to_float64]) -> SlogdetResult[np.float64, np.float64]: ... +@overload # >2d +float64 +def slogdet(a: _Array3ND[_to_float64]) -> SlogdetResult[NDArray[np.float64], NDArray[np.float64]]: ... +@overload # 2d ~complex128 +def slogdet(a: _Array2D[np.complex128] | Sequence[list[complex]]) -> SlogdetResult[np.float64, np.complex128]: ... +@overload # >2d ~complex128 +def slogdet( + a: _Array3ND[np.complex128] | _NestedSequence[Sequence[list[complex]]] +) -> SlogdetResult[NDArray[np.float64], NDArray[np.complex128]]: ... +@overload # fallback def slogdet(a: _ArrayLikeComplex_co) -> SlogdetResult: ... -# TODO: Returns a 2-tuple of scalars for 2D arrays and -# a 2-tuple of `(a.ndim - 2)`` dimensionl arrays otherwise +# keep in sync with `slogdet` +@overload # workaround for microsoft/pyright#10232 +def det(a: np.ndarray[_JustAnyShape, np.dtype[np.number]]) -> Any: ... +@overload # 2d ~inexact32 +def det[ScalarT: _inexact32](a: _Array2D[ScalarT]) -> ScalarT: ... +@overload # >2d ~inexact32 +def det[ScalarT: _inexact32](a: _Array3ND[ScalarT]) -> NDArray[ScalarT]: ... +@overload # 2d +float64 +def det(a: _Array2D[_to_float64]) -> np.float64: ... +@overload # >2d +float64 +def det(a: _Array3ND[_to_float64]) -> NDArray[np.float64]: ... +@overload # 2d ~complex128 +def det(a: _Array2D[np.complex128] | Sequence[list[complex]]) -> np.complex128: ... +@overload # >2d ~complex128 +def det(a: _Array3ND[np.complex128] | _NestedSequence[Sequence[list[complex]]]) -> NDArray[np.complex128]: ... +@overload # fallback def det(a: _ArrayLikeComplex_co) -> Any: ... -@overload -def lstsq(a: _ArrayLikeInt_co, b: _ArrayLikeInt_co, rcond: float | None = ...) -> tuple[ - NDArray[float64], - NDArray[float64], - int32, - NDArray[float64], -]: ... -@overload -def lstsq(a: _ArrayLikeFloat_co, b: _ArrayLikeFloat_co, rcond: float | None = ...) -> tuple[ - NDArray[floating], - NDArray[floating], - int32, - NDArray[floating], -]: ... -@overload -def lstsq(a: _ArrayLikeComplex_co, b: _ArrayLikeComplex_co, rcond: float | None = ...) -> tuple[ - NDArray[complexfloating], - NDArray[floating], - int32, - NDArray[floating], -]: ... +# +@overload # +float64, ~float64, known shape +def lstsq[ShapeT: tuple[int] | tuple[int, int]]( + a: _SupportsArray[tuple[int, int], np.dtype[_to_float64]] | Sequence[Sequence[float]], + b: _SupportsArray[ShapeT, np.dtype[np.floating | np.integer]], + rcond: float | None = None, +) -> _LstSqResult[ShapeT, np.float64, np.float64]: ... +@overload # ~float64, +float64, known shape +def lstsq[ShapeT: tuple[int] | tuple[int, int]]( + a: _SupportsArray[tuple[int, int], np.dtype[np.floating | np.integer]] | Sequence[Sequence[float]], + b: _SupportsArray[ShapeT, np.dtype[_to_float64]], + rcond: float | None = None, +) -> _LstSqResult[ShapeT, np.float64, np.float64]: ... +@overload # +complex128, ~complex128, known shape +def lstsq[ShapeT: tuple[int] | tuple[int, int]]( + a: _SupportsArray[tuple[int, int], np.dtype[np.number]] | Sequence[Sequence[complex]], + b: _SupportsArray[ShapeT, np.dtype[np.complex128]], + rcond: float | None = None, +) -> _LstSqResult[ShapeT, np.complex128, np.float64]: ... +@overload # ~complex128, +complex128, known shape +def lstsq[ShapeT: tuple[int] | tuple[int, int]]( + a: _SupportsArray[tuple[int, int], np.dtype[np.complex128]] | Sequence[list[complex]], + b: _SupportsArray[ShapeT, np.dtype[np.number]], + rcond: float | None = None, +) -> _LstSqResult[ShapeT, np.complex128, np.float64]: ... +@overload # ~float32, ~float32, known shape +def lstsq[ShapeT: tuple[int] | tuple[int, int]]( + a: _SupportsArray[tuple[int, int], np.dtype[np.float32]], + b: _SupportsArray[ShapeT, np.dtype[np.float32]], + rcond: float | None = None, +) -> _LstSqResult[ShapeT, np.float32, np.float32]: ... +@overload # +complex64, ~complex64, known shape +def lstsq[ShapeT: tuple[int] | tuple[int, int]]( + a: _SupportsArray[tuple[int, int], np.dtype[np.complex64 | np.float32]], + b: _SupportsArray[ShapeT, np.dtype[np.complex64]], + rcond: float | None = None, +) -> _LstSqResult[ShapeT, np.complex64, np.float32]: ... +@overload # ~complex64, +complex64, known shape +def lstsq[ShapeT: tuple[int] | tuple[int, int]]( + a: _SupportsArray[tuple[int, int], np.dtype[np.complex64]], + b: _SupportsArray[ShapeT, np.dtype[np.complex64 | np.float32]], + rcond: float | None = None, +) -> _LstSqResult[ShapeT, np.complex64, np.float32]: ... +@overload # +float64, +float64, unknown shape +def lstsq( + a: _ArrayLikeFloat_co, + b: _ArrayLikeFloat_co, + rcond: float | None = None, +) -> _LstSqResult[_AnyShape, np.float64 | Any, np.float64 | Any]: ... +@overload # +complex128, +complex128, unknown shape +def lstsq( + a: _ArrayLikeComplex_co, + b: _ArrayLikeComplex_co, + rcond: float | None = None, +) -> _LstSqResult[_AnyShape, np.complex128 | Any, np.float64 | Any]: ... +# TODO: narrow return types @overload def norm( x: ArrayLike, - ord: float | L["fro", "nuc"] | None = ..., - axis: None = ..., - keepdims: bool = ..., + ord: float | L["fro", "nuc"] | None = None, + axis: None = None, + keepdims: L[False] = False, ) -> floating: ... @overload def norm( x: ArrayLike, - ord: float | L["fro", "nuc"] | None = ..., - axis: SupportsInt | SupportsIndex | tuple[int, ...] = ..., - keepdims: bool = ..., + ord: float | L["fro", "nuc"] | None, + axis: SupportsInt | SupportsIndex | tuple[int, ...] | None, + keepdims: bool = False, +) -> Any: ... +@overload +def norm( + x: ArrayLike, + ord: float | L["fro", "nuc"] | None = None, + *, + axis: SupportsInt | SupportsIndex | tuple[int, ...] | None, + keepdims: bool = False, ) -> Any: ... +# TODO: narrow return types @overload def matrix_norm( x: ArrayLike, /, *, - ord: float | L["fro", "nuc"] | None = ..., - keepdims: bool = ..., + ord: float | L["fro", "nuc"] | None = "fro", + keepdims: L[False] = False, ) -> floating: ... @overload def matrix_norm( x: ArrayLike, /, *, - ord: float | L["fro", "nuc"] | None = ..., - keepdims: bool = ..., + ord: float | L["fro", "nuc"] | None = "fro", + keepdims: bool = False, ) -> Any: ... +# TODO: narrow return types @overload def vector_norm( x: ArrayLike, /, *, - axis: None = ..., - ord: float | None = ..., - keepdims: bool = ..., + axis: None = None, + ord: float | None = 2, + keepdims: L[False] = False, ) -> floating: ... @overload def vector_norm( x: ArrayLike, /, *, - axis: SupportsInt | SupportsIndex | tuple[int, ...] = ..., - ord: float | None = ..., - keepdims: bool = ..., + axis: SupportsInt | SupportsIndex | tuple[int, ...], + ord: float | None = 2, + keepdims: bool = False, ) -> Any: ... +# keep in sync with numpy._core.numeric.tensordot (ignoring `/, *`) +@overload +def tensordot[ScalarT: np.number | np.timedelta64 | np.object_]( + a: _ArrayLike[ScalarT], + b: _ArrayLike[ScalarT], + /, + *, + axes: int | tuple[_ShapeLike, _ShapeLike] = 2, +) -> NDArray[ScalarT]: ... +@overload +def tensordot( + a: _ArrayLikeBool_co, + b: _ArrayLikeBool_co, + /, + *, + axes: int | tuple[_ShapeLike, _ShapeLike] = 2, +) -> NDArray[np.bool_]: ... +@overload +def tensordot( + a: _ArrayLikeInt_co, + b: _ArrayLikeInt_co, + /, + *, + axes: int | tuple[_ShapeLike, _ShapeLike] = 2, +) -> NDArray[np.int_ | Any]: ... +@overload +def tensordot( + a: _ArrayLikeFloat_co, + b: _ArrayLikeFloat_co, + /, + *, + axes: int | tuple[_ShapeLike, _ShapeLike] = 2, +) -> NDArray[np.float64 | Any]: ... +@overload +def tensordot( + a: _ArrayLikeComplex_co, + b: _ArrayLikeComplex_co, + /, + *, + axes: int | tuple[_ShapeLike, _ShapeLike] = 2, +) -> NDArray[np.complex128 | Any]: ... + # TODO: Returns a scalar or array def multi_dot( arrays: Iterable[_ArrayLikeComplex_co | _ArrayLikeObject_co | _ArrayLikeTD64_co], *, - out: NDArray[Any] | None = ..., + out: NDArray[Any] | None = None, ) -> Any: ... -def diagonal( - x: ArrayLike, # >= 2D array +# +@overload # workaround for microsoft/pyright#10232 +def diagonal[DTypeT: np.dtype]( + x: _SupportsArray[_JustAnyShape, DTypeT], /, *, offset: SupportsIndex = 0 +) -> np.ndarray[_AnyShape, DTypeT]: ... +@overload # 2d, known dtype +def diagonal[DTypeT: np.dtype]( + x: _SupportsArray[tuple[int, int], DTypeT], /, *, offset: SupportsIndex = 0 +) -> np.ndarray[tuple[int], DTypeT]: ... +@overload # 3d, known dtype +def diagonal[DTypeT: np.dtype]( + x: _SupportsArray[tuple[int, int, int], DTypeT], /, *, offset: SupportsIndex = 0 +) -> np.ndarray[tuple[int, int], DTypeT]: ... +@overload # 4d, known dtype +def diagonal[DTypeT: np.dtype]( + x: _SupportsArray[tuple[int, int, int, int], DTypeT], /, *, offset: SupportsIndex = 0 +) -> np.ndarray[tuple[int, int, int], DTypeT]: ... +@overload # nd like ~bool +def diagonal(x: _NestedSequence[list[bool]], /, *, offset: SupportsIndex = 0) -> NDArray[np.bool]: ... +@overload # nd like ~int +def diagonal(x: _NestedSequence[list[int]], /, *, offset: SupportsIndex = 0) -> NDArray[np.int_]: ... +@overload # nd like ~float +def diagonal(x: _NestedSequence[list[float]], /, *, offset: SupportsIndex = 0) -> NDArray[np.float64]: ... +@overload # nd like ~complex +def diagonal(x: _NestedSequence[list[complex]], /, *, offset: SupportsIndex = 0) -> NDArray[np.complex128]: ... +@overload # nd like ~bytes +def diagonal(x: _NestedSequence[list[bytes]], /, *, offset: SupportsIndex = 0) -> NDArray[np.bytes_]: ... +@overload # nd like ~str +def diagonal(x: _NestedSequence[list[str]], /, *, offset: SupportsIndex = 0) -> NDArray[np.str_]: ... +@overload # fallback +def diagonal(x: ArrayLike, /, *, offset: SupportsIndex = 0) -> np.ndarray: ... + +# +@overload # workaround for microsoft/pyright#10232 +def trace( + x: _SupportsArray[_JustAnyShape, np.dtype[_to_complex]], /, *, offset: SupportsIndex = 0, dtype: DTypeLike | None = None +) -> Any: ... +@overload # 2d known dtype, dtype=None +def trace[ScalarT: _to_complex]( + x: _SupportsArray[tuple[int, int], np.dtype[ScalarT]], /, *, offset: SupportsIndex = 0, dtype: None = None +) -> ScalarT: ... +@overload # 2d, dtype= +def trace[ScalarT: _to_complex]( + x: _SupportsArray[tuple[int, int], np.dtype[_to_complex]] | Sequence[Sequence[_ComplexLike_co]], /, *, - offset: SupportsIndex = ..., -) -> NDArray[Any]: ... - -def trace( - x: ArrayLike, # >= 2D array + offset: SupportsIndex = 0, + dtype: _DTypeLike[ScalarT], +) -> ScalarT: ... +@overload # 2d bool +def trace(x: Sequence[Sequence[bool]], /, *, offset: SupportsIndex = 0, dtype: None = None) -> np.bool: ... +@overload # 2d int +def trace(x: Sequence[list[int]], /, *, offset: SupportsIndex = 0, dtype: None = None) -> np.int_: ... +@overload # 2d float +def trace(x: Sequence[list[float]], /, *, offset: SupportsIndex = 0, dtype: None = None) -> np.float64: ... +@overload # 2d complex +def trace(x: Sequence[list[complex]], /, *, offset: SupportsIndex = 0, dtype: None = None) -> np.complex128: ... +@overload # 3d known dtype, dtype=None +def trace[DTypeT: np.dtype[_to_complex]]( + x: _SupportsArray[tuple[int, int, int], DTypeT], /, *, offset: SupportsIndex = 0, dtype: None = None +) -> np.ndarray[tuple[int], DTypeT]: ... +@overload # 3d, dtype= +def trace[ScalarT: _to_complex]( + x: _SupportsArray[tuple[int, int, int], np.dtype[_to_complex]] | Sequence[Sequence[Sequence[_ComplexLike_co]]], /, *, - offset: SupportsIndex = ..., - dtype: DTypeLike = ..., -) -> Any: ... + offset: SupportsIndex = 0, + dtype: _DTypeLike[ScalarT], +) -> np.ndarray[tuple[int], np.dtype[ScalarT]]: ... +@overload # 3d+ known dtype, dtype=None +def trace[DTypeT: np.dtype[_to_complex]]( + x: _SupportsArray[_AtLeast3D, DTypeT], /, *, offset: SupportsIndex = 0, dtype: None = None +) -> np.ndarray[tuple[int, *tuple[Any, ...]], DTypeT]: ... +@overload # 3d+, dtype= +def trace[ScalarT: _to_complex]( + x: _SupportsArray[_AtLeast3D, np.dtype[_to_complex]] | _NestedSequence[Sequence[Sequence[_ComplexLike_co]]], + /, + *, + offset: SupportsIndex = 0, + dtype: _DTypeLike[ScalarT], +) -> np.ndarray[tuple[int, *tuple[Any, ...]], np.dtype[ScalarT]]: ... +@overload # 3d+ bool +def trace( + x: _NestedSequence[Sequence[Sequence[bool]]], /, *, offset: SupportsIndex = 0, dtype: None = None +) -> NDArray[np.bool]: ... +@overload # 3d+ int +def trace( + x: _NestedSequence[Sequence[list[int]]], /, *, offset: SupportsIndex = 0, dtype: None = None +) -> NDArray[np.int_]: ... +@overload # 3d+ float +def trace( + x: _NestedSequence[Sequence[list[float]]], /, *, offset: SupportsIndex = 0, dtype: None = None +) -> NDArray[np.float64]: ... +@overload # 3d+ complex +def trace( + x: _NestedSequence[Sequence[list[complex]]], /, *, offset: SupportsIndex = 0, dtype: None = None +) -> NDArray[np.complex128]: ... +@overload # fallback +def trace(x: _ArrayLikeComplex_co, /, *, offset: SupportsIndex = 0, dtype: DTypeLike | None = None) -> Any: ... +# TODO: narrow return types @overload def cross( x1: _ArrayLikeUInt_co, x2: _ArrayLikeUInt_co, /, *, - axis: int = ..., + axis: int = -1, ) -> NDArray[unsignedinteger]: ... @overload def cross( @@ -441,7 +818,7 @@ def cross( x2: _ArrayLikeInt_co, /, *, - axis: int = ..., + axis: int = -1, ) -> NDArray[signedinteger]: ... @overload def cross( @@ -449,7 +826,7 @@ def cross( x2: _ArrayLikeFloat_co, /, *, - axis: int = ..., + axis: int = -1, ) -> NDArray[floating]: ... @overload def cross( @@ -457,26 +834,17 @@ def cross( x2: _ArrayLikeComplex_co, /, *, - axis: int = ..., + axis: int = -1, ) -> NDArray[complexfloating]: ... +# TODO: narrow return types @overload -def matmul( - x1: _ArrayLikeInt_co, - x2: _ArrayLikeInt_co, -) -> NDArray[signedinteger]: ... +def matmul[ScalarT: np.number](x1: _ArrayLike[ScalarT], x2: _ArrayLike[ScalarT], /) -> NDArray[ScalarT]: ... @overload -def matmul( - x1: _ArrayLikeUInt_co, - x2: _ArrayLikeUInt_co, -) -> NDArray[unsignedinteger]: ... +def matmul(x1: _ArrayLikeUInt_co, x2: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... @overload -def matmul( - x1: _ArrayLikeFloat_co, - x2: _ArrayLikeFloat_co, -) -> NDArray[floating]: ... +def matmul(x1: _ArrayLikeInt_co, x2: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... @overload -def matmul( - x1: _ArrayLikeComplex_co, - x2: _ArrayLikeComplex_co, -) -> NDArray[complexfloating]: ... +def matmul(x1: _ArrayLikeFloat_co, x2: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... +@overload +def matmul(x1: _ArrayLikeComplex_co, x2: _ArrayLikeComplex_co, /) -> NDArray[complexfloating]: ... diff --git a/numpy/linalg/_umath_linalg.pyi b/numpy/linalg/_umath_linalg.pyi index cd07acdb1f9e..f90706a7b159 100644 --- a/numpy/linalg/_umath_linalg.pyi +++ b/numpy/linalg/_umath_linalg.pyi @@ -1,5 +1,4 @@ -from typing import Final -from typing import Literal as L +from typing import Final, Literal as L import numpy as np from numpy._typing._ufunc import _GUFunc_Nin2_Nout1 diff --git a/numpy/linalg/lapack_lite.pyi b/numpy/linalg/lapack_lite.pyi index 835293a26762..3ec3919bfa3b 100644 --- a/numpy/linalg/lapack_lite.pyi +++ b/numpy/linalg/lapack_lite.pyi @@ -57,6 +57,8 @@ class _ZUNGQR(TypedDict): _ilp64: Final[bool] = ... +class LapackError(Exception): ... + def dgelsd( m: int, n: int, diff --git a/numpy/linalg/lapack_lite/f2c.c b/numpy/linalg/lapack_lite/f2c.c index 47e4d5729b83..9afac89e61d1 100644 --- a/numpy/linalg/lapack_lite/f2c.c +++ b/numpy/linalg/lapack_lite/f2c.c @@ -191,7 +191,7 @@ integer i_dnnt(x) doublereal *x; integer i_dnnt(doublereal *x) #endif { -return( (*x)>=0 ? +return (integer)( (*x)>=0 ? floor(*x + .5) : -floor(.5 - *x) ); } diff --git a/numpy/linalg/lapack_litemodule.c b/numpy/linalg/lapack_litemodule.c index e5f3af05af22..cad5f3f92f09 100644 --- a/numpy/linalg/lapack_litemodule.c +++ b/numpy/linalg/lapack_litemodule.c @@ -377,28 +377,25 @@ static struct PyMethodDef lapack_lite_module_methods[] = { { NULL,NULL,0, NULL} }; +static int module_loaded = 0; -static struct PyModuleDef moduledef = { - PyModuleDef_HEAD_INIT, - "lapack_lite", - NULL, - -1, - lapack_lite_module_methods, - NULL, - NULL, - NULL, - NULL -}; - -/* Initialization function for the module */ -PyMODINIT_FUNC PyInit_lapack_lite(void) +static int +lapack_lite_exec(PyObject *m) { - PyObject *m,*d; - m = PyModule_Create(&moduledef); - if (m == NULL) { - return NULL; + PyObject *d; + + // https://docs.python.org/3/howto/isolating-extensions.html#opt-out-limiting-to-one-module-object-per-process + if (module_loaded) { + PyErr_SetString(PyExc_ImportError, + "cannot load module more than once per process"); + return -1; + } + module_loaded = 1; + + if (PyArray_ImportNumPyAPI() < 0) { + return -1; } - import_array(); + d = PyModule_GetDict(m); LapackError = PyErr_NewException("numpy.linalg.lapack_lite.LapackError", NULL, NULL); PyDict_SetItemString(d, "LapackError", LapackError); @@ -409,10 +406,29 @@ PyMODINIT_FUNC PyInit_lapack_lite(void) PyDict_SetItemString(d, "_ilp64", Py_False); #endif -#if Py_GIL_DISABLED - // signal this module supports running with the GIL disabled - PyUnstable_Module_SetGIL(m, Py_MOD_GIL_NOT_USED); + return 0; +} + +static struct PyModuleDef_Slot lapack_lite_slots[] = { + {Py_mod_exec, lapack_lite_exec}, +#if PY_VERSION_HEX >= 0x030c00f0 // Python 3.12+ + {Py_mod_multiple_interpreters, Py_MOD_MULTIPLE_INTERPRETERS_NOT_SUPPORTED}, +#endif +#if PY_VERSION_HEX >= 0x030d00f0 // Python 3.13+ + // signal that this module supports running without an active GIL + {Py_mod_gil, Py_MOD_GIL_NOT_USED}, #endif + {0, NULL}, +}; + +static struct PyModuleDef moduledef = { + .m_base = PyModuleDef_HEAD_INIT, + .m_name = "lapack_lite", + .m_size = 0, + .m_methods = lapack_lite_module_methods, + .m_slots = lapack_lite_slots, +}; - return m; +PyMODINIT_FUNC PyInit_lapack_lite(void) { + return PyModuleDef_Init(&moduledef); } diff --git a/numpy/linalg/linalg.py b/numpy/linalg/linalg.py deleted file mode 100644 index 81c80d0fd690..000000000000 --- a/numpy/linalg/linalg.py +++ /dev/null @@ -1,17 +0,0 @@ -def __getattr__(attr_name): - import warnings - - from numpy.linalg import _linalg - ret = getattr(_linalg, attr_name, None) - if ret is None: - raise AttributeError( - f"module 'numpy.linalg.linalg' has no attribute {attr_name}") - warnings.warn( - "The numpy.linalg.linalg has been made private and renamed to " - "numpy.linalg._linalg. All public functions exported by it are " - f"available from numpy.linalg. Please use numpy.linalg.{attr_name} " - "instead.", - DeprecationWarning, - stacklevel=3 - ) - return ret diff --git a/numpy/linalg/linalg.pyi b/numpy/linalg/linalg.pyi deleted file mode 100644 index dbe9becfb8d5..000000000000 --- a/numpy/linalg/linalg.pyi +++ /dev/null @@ -1,69 +0,0 @@ -from ._linalg import ( - LinAlgError, - cholesky, - cond, - cross, - det, - diagonal, - eig, - eigh, - eigvals, - eigvalsh, - inv, - lstsq, - matmul, - matrix_norm, - matrix_power, - matrix_rank, - matrix_transpose, - multi_dot, - norm, - outer, - pinv, - qr, - slogdet, - solve, - svd, - svdvals, - tensordot, - tensorinv, - tensorsolve, - trace, - vecdot, - vector_norm, -) - -__all__ = [ - "LinAlgError", - "cholesky", - "cond", - "cross", - "det", - "diagonal", - "eig", - "eigh", - "eigvals", - "eigvalsh", - "inv", - "lstsq", - "matmul", - "matrix_norm", - "matrix_power", - "matrix_rank", - "matrix_transpose", - "multi_dot", - "norm", - "outer", - "pinv", - "qr", - "slogdet", - "solve", - "svd", - "svdvals", - "tensordot", - "tensorinv", - "tensorsolve", - "trace", - "vecdot", - "vector_norm", -] diff --git a/numpy/linalg/meson.build b/numpy/linalg/meson.build index e2f8136208d6..1d3297286317 100644 --- a/numpy/linalg/meson.build +++ b/numpy/linalg/meson.build @@ -47,8 +47,6 @@ py.install_sources( '_linalg.pyi', '_umath_linalg.pyi', 'lapack_lite.pyi', - 'linalg.py', - 'linalg.pyi', ], subdir: 'numpy/linalg' ) diff --git a/numpy/linalg/tests/test_deprecations.py b/numpy/linalg/tests/test_deprecations.py index cd4c10832e7e..7fb5008f1ff8 100644 --- a/numpy/linalg/tests/test_deprecations.py +++ b/numpy/linalg/tests/test_deprecations.py @@ -1,8 +1,9 @@ """Test deprecation and future warnings. """ +import pytest + import numpy as np -from numpy.testing import assert_warns def test_qr_mode_full_future_warning(): @@ -14,7 +15,7 @@ def test_qr_mode_full_future_warning(): """ a = np.eye(2) - assert_warns(DeprecationWarning, np.linalg.qr, a, mode='full') - assert_warns(DeprecationWarning, np.linalg.qr, a, mode='f') - assert_warns(DeprecationWarning, np.linalg.qr, a, mode='economic') - assert_warns(DeprecationWarning, np.linalg.qr, a, mode='e') + pytest.warns(DeprecationWarning, np.linalg.qr, a, mode='full') + pytest.warns(DeprecationWarning, np.linalg.qr, a, mode='f') + pytest.warns(DeprecationWarning, np.linalg.qr, a, mode='economic') + pytest.warns(DeprecationWarning, np.linalg.qr, a, mode='e') diff --git a/numpy/linalg/tests/test_linalg.py b/numpy/linalg/tests/test_linalg.py index cbf7dd63be5e..b3744024fd88 100644 --- a/numpy/linalg/tests/test_linalg.py +++ b/numpy/linalg/tests/test_linalg.py @@ -8,6 +8,7 @@ import textwrap import threading import traceback +import warnings import pytest @@ -42,7 +43,6 @@ assert_equal, assert_raises, assert_raises_regex, - suppress_warnings, ) try: @@ -793,15 +793,28 @@ def do(self, a, b, tags): class TestCond(CondCases): - def test_basic_nonsvd(self): + @pytest.mark.parametrize('is_complex', [False, True]) + def test_basic_nonsvd(self, is_complex): # Smoketest the non-svd norms A = array([[1., 0, 1], [0, -2., 0], [0, 0, 3.]]) + if is_complex: + # Since A is linearly scaled, the condition number should not change + A = A * (1 + 1j) assert_almost_equal(linalg.cond(A, inf), 4) assert_almost_equal(linalg.cond(A, -inf), 2 / 3) assert_almost_equal(linalg.cond(A, 1), 4) assert_almost_equal(linalg.cond(A, -1), 0.5) assert_almost_equal(linalg.cond(A, 'fro'), np.sqrt(265 / 12)) + @pytest.mark.parametrize('dtype', [single, double, csingle, cdouble]) + @pytest.mark.parametrize('norm_ord', [1, -1, 2, -2, 'fro', np.inf, -np.inf]) + def test_cond_dtypes(self, dtype, norm_ord): + # Check that the condition number is computed in the same dtype + # as the input matrix + A = array([[1., 0, 1], [0, -2., 0], [0, 0, 3.]], dtype=dtype) + out_type = get_real_dtype(dtype) + assert_equal(linalg.cond(A, p=norm_ord).dtype, out_type) + def test_singular(self): # Singular matrices have infinite condition number for # positive norms, and negative norms shouldn't raise @@ -1318,8 +1331,9 @@ def test_vector_return_type(self): self.check_dtype(at, an) assert_almost_equal(an, 0.0) - with suppress_warnings() as sup: - sup.filter(RuntimeWarning, "divide by zero encountered") + with warnings.catch_warnings(): + warnings.filterwarnings( + 'ignore', "divide by zero encountered", RuntimeWarning) an = norm(at, -1) self.check_dtype(at, an) assert_almost_equal(an, 0.0) @@ -1481,8 +1495,9 @@ def test_matrix_return_type(self): self.check_dtype(at, an) assert_almost_equal(an, 2.0) - with suppress_warnings() as sup: - sup.filter(RuntimeWarning, "divide by zero encountered") + with warnings.catch_warnings(): + warnings.filterwarnings( + 'ignore', "divide by zero encountered", RuntimeWarning) an = norm(at, -1) self.check_dtype(at, an) assert_almost_equal(an, 1.0) @@ -2203,8 +2218,7 @@ def test_non_square_handling(self, arr, ind): ((24, 8, 3), 1), ]) def test_tensorinv_shape(self, shape, ind): - a = np.eye(24) - a.shape = shape + a = np.eye(24).reshape(shape) ainv = linalg.tensorinv(a=a, ind=ind) expected = a.shape[ind:] + a.shape[:ind] actual = ainv.shape @@ -2214,15 +2228,13 @@ def test_tensorinv_shape(self, shape, ind): 0, -2, ]) def test_tensorinv_ind_limit(self, ind): - a = np.eye(24) - a.shape = (4, 6, 8, 3) + a = np.eye(24).reshape((4, 6, 8, 3)) with assert_raises(ValueError): linalg.tensorinv(a=a, ind=ind) def test_tensorinv_result(self): # mimic a docstring example - a = np.eye(24) - a.shape = (24, 8, 3) + a = np.eye(24).reshape((24, 8, 3)) ainv = linalg.tensorinv(a, ind=1) b = np.ones(24) assert_allclose(np.tensordot(ainv, b, 1), np.linalg.tensorsolve(a, b)) diff --git a/numpy/linalg/tests/test_regression.py b/numpy/linalg/tests/test_regression.py index c46f83adb0af..053e7130da63 100644 --- a/numpy/linalg/tests/test_regression.py +++ b/numpy/linalg/tests/test_regression.py @@ -33,7 +33,7 @@ def test_eig_build(self): 1.51971555e-15 + 0.j, -1.51308713e-15 + 0.j]) a = arange(13 * 13, dtype=float64) - a.shape = (13, 13) + a = a.reshape((13, 13)) a = a % 17 va, ve = linalg.eig(a) va.sort() @@ -165,6 +165,7 @@ def test_matrix_rank_rtol_argument(self, rtol): res = np.linalg.matrix_rank(x, rtol=rtol) assert res.shape == (4,) + @pytest.mark.thread_unsafe(reason="test is already testing threads with openblas") def test_openblas_threading(self): # gh-27036 # Test whether matrix multiplication involving a large matrix always diff --git a/numpy/linalg/umath_linalg.cpp b/numpy/linalg/umath_linalg.cpp index ead6d84a73a2..4845b1261aca 100644 --- a/numpy/linalg/umath_linalg.cpp +++ b/numpy/linalg/umath_linalg.cpp @@ -448,6 +448,15 @@ set_fp_invalid_or_clear(int error_occurred) } } +static inline void +report_no_memory() +{ + NPY_ALLOW_C_API_DEF + NPY_ALLOW_C_API; + PyErr_NoMemory(); + NPY_DISABLE_C_API; +} + /* ***************************************************************************** ** Some handy constants ** @@ -1199,10 +1208,7 @@ slogdet(char **args, } else { /* TODO: Requires use of new ufunc API to indicate error return */ - NPY_ALLOW_C_API_DEF - NPY_ALLOW_C_API; - PyErr_NoMemory(); - NPY_DISABLE_C_API; + report_no_memory(); } } @@ -1255,10 +1261,7 @@ det(char **args, } else { /* TODO: Requires use of new ufunc API to indicate error return */ - NPY_ALLOW_C_API_DEF - NPY_ALLOW_C_API; - PyErr_NoMemory(); - NPY_DISABLE_C_API; + report_no_memory(); } } @@ -1331,7 +1334,7 @@ init_evd(EIGH_PARAMS_t* params, char JOBZ, char UPLO, mem_buff = (npy_uint8 *)malloc(alloc_size); if (!mem_buff) { - goto error; + goto no_memory; } a = mem_buff; w = mem_buff + safe_N * safe_N * sizeof(typ); @@ -1365,7 +1368,7 @@ init_evd(EIGH_PARAMS_t* params, char JOBZ, char UPLO, mem_buff2 = (npy_uint8 *)malloc(lwork*sizeof(typ) + liwork*sizeof(fortran_int)); if (!mem_buff2) { - goto error; + goto no_memory; } work = mem_buff2; @@ -1378,6 +1381,9 @@ init_evd(EIGH_PARAMS_t* params, char JOBZ, char UPLO, return 1; + no_memory: + report_no_memory(); + error: /* something failed */ memset(params, 0, sizeof(*params)); @@ -1440,7 +1446,7 @@ using fbasetyp = fortran_type_t; mem_buff = (npy_uint8 *)malloc(safe_N * safe_N * sizeof(typ) + safe_N * sizeof(basetyp)); if (!mem_buff) { - goto error; + goto no_memory; } a = mem_buff; w = mem_buff + safe_N * safe_N * sizeof(typ); @@ -1478,7 +1484,7 @@ using fbasetyp = fortran_type_t; lrwork*sizeof(basetyp) + liwork*sizeof(fortran_int)); if (!mem_buff2) { - goto error; + goto no_memory; } work = mem_buff2; @@ -1495,6 +1501,8 @@ using fbasetyp = fortran_type_t; return 1; /* something failed */ +no_memory: + report_no_memory(); error: memset(params, 0, sizeof(*params)); free(mem_buff2); @@ -1733,7 +1741,10 @@ init_gesv(GESV_PARAMS_t *params, fortran_int N, fortran_int NRHS) params->LDB = ld; return 1; + error: + report_no_memory(); + free(mem_buff); memset(params, 0, sizeof(*params)); @@ -1977,6 +1988,8 @@ init_potrf(POTR_PARAMS_t *params, char UPLO, fortran_int N) return 1; error: + report_no_memory(); + free(mem_buff); memset(params, 0, sizeof(*params)); @@ -2175,7 +2188,7 @@ scalar_trait) vlr_size + vrr_size + w_size + vl_size + vr_size); if (!mem_buff) { - goto error; + goto no_memory; } a = mem_buff; @@ -2218,7 +2231,7 @@ scalar_trait) mem_buff2 = (npy_uint8 *)malloc(work_count*sizeof(typ)); if (!mem_buff2) { - goto error; + goto no_memory; } work = mem_buff2; @@ -2226,6 +2239,10 @@ scalar_trait) params->WORK = (typ*)work; return 1; + + no_memory: + report_no_memory(); + error: free(mem_buff2); free(mem_buff); @@ -2392,7 +2409,7 @@ using realtyp = basetype_t; mem_buff = (npy_uint8 *)malloc(total_size); if (!mem_buff) { - goto error; + goto no_memory; } a = mem_buff; @@ -2434,7 +2451,7 @@ using realtyp = basetype_t; mem_buff2 = (npy_uint8 *)malloc(work_count*sizeof(ftyp)); if (!mem_buff2) { - goto error; + goto no_memory; } work = mem_buff2; @@ -2443,6 +2460,9 @@ using realtyp = basetype_t; params->WORK = (ftyp*)work; return 1; + + no_memory: + report_no_memory(); error: free(mem_buff2); free(mem_buff); @@ -2754,7 +2774,7 @@ init_gesdd(GESDD_PARAMS_t *params, mem_buff = (npy_uint8 *)malloc(a_size + s_size + u_size + vt_size + iwork_size); if (!mem_buff) { - goto error; + goto no_memory; } a = mem_buff; @@ -2798,7 +2818,7 @@ init_gesdd(GESDD_PARAMS_t *params, mem_buff2 = (npy_uint8 *)malloc(work_size); if (!mem_buff2) { - goto error; + goto no_memory; } work = mem_buff2; @@ -2807,6 +2827,9 @@ init_gesdd(GESDD_PARAMS_t *params, params->WORK = (ftyp*)work; return 1; + + no_memory: + report_no_memory(); error: TRACE_TXT("%s failed init\n", __FUNCTION__); free(mem_buff); @@ -2894,7 +2917,7 @@ using frealtyp = basetype_t; rwork_size + iwork_size); if (!mem_buff) { - goto error; + goto no_memory; } a = mem_buff; @@ -2939,7 +2962,7 @@ using frealtyp = basetype_t; mem_buff2 = (npy_uint8 *)malloc(work_size); if (!mem_buff2) { - goto error; + goto no_memory; } work = mem_buff2; @@ -2948,6 +2971,10 @@ using frealtyp = basetype_t; params->WORK = (ftyp*)work; return 1; + + no_memory: + report_no_memory(); + error: TRACE_TXT("%s failed init\n", __FUNCTION__); free(mem_buff2); @@ -3186,7 +3213,7 @@ using ftyp = fortran_doublereal; mem_buff = (npy_uint8 *)malloc(a_size + tau_size); if (!mem_buff) - goto error; + goto no_memory; a = mem_buff; tau = a + a_size; @@ -3219,13 +3246,17 @@ using ftyp = fortran_doublereal; work_size = (size_t) params->LWORK * sizeof(ftyp); mem_buff2 = (npy_uint8 *)malloc(work_size); if (!mem_buff2) - goto error; + goto no_memory; work = mem_buff2; params->WORK = (ftyp*)work; return 1; + + no_memory: + report_no_memory(); + error: TRACE_TXT("%s failed init\n", __FUNCTION__); free(mem_buff); @@ -3260,7 +3291,7 @@ using ftyp = fortran_doublecomplex; mem_buff = (npy_uint8 *)malloc(a_size + tau_size); if (!mem_buff) - goto error; + goto no_memory; a = mem_buff; tau = a + a_size; @@ -3295,13 +3326,17 @@ using ftyp = fortran_doublecomplex; mem_buff2 = (npy_uint8 *)malloc(work_size); if (!mem_buff2) - goto error; + goto no_memory; work = mem_buff2; params->WORK = (ftyp*)work; return 1; + + no_memory: + report_no_memory(); + error: TRACE_TXT("%s failed init\n", __FUNCTION__); free(mem_buff); @@ -3433,7 +3468,7 @@ using ftyp = fortran_doublereal; mem_buff = (npy_uint8 *)malloc(q_size + tau_size + a_size); if (!mem_buff) - goto error; + goto no_memory; q = mem_buff; tau = q + q_size; @@ -3468,13 +3503,17 @@ using ftyp = fortran_doublereal; mem_buff2 = (npy_uint8 *)malloc(work_size); if (!mem_buff2) - goto error; + goto no_memory; work = mem_buff2; params->WORK = (ftyp*)work; return 1; + + no_memory: + report_no_memory(); + error: TRACE_TXT("%s failed init\n", __FUNCTION__); free(mem_buff); @@ -3512,7 +3551,7 @@ using ftyp=fortran_doublecomplex; mem_buff = (npy_uint8 *)malloc(q_size + tau_size + a_size); if (!mem_buff) - goto error; + goto no_memory; q = mem_buff; tau = q + q_size; @@ -3548,7 +3587,7 @@ using ftyp=fortran_doublecomplex; mem_buff2 = (npy_uint8 *)malloc(work_size); if (!mem_buff2) - goto error; + goto no_memory; work = mem_buff2; @@ -3556,6 +3595,10 @@ using ftyp=fortran_doublecomplex; params->LWORK = work_count; return 1; + + no_memory: + report_no_memory(); + error: TRACE_TXT("%s failed init\n", __FUNCTION__); free(mem_buff); @@ -3898,10 +3941,7 @@ scalar_trait) return 1; no_memory: - NPY_ALLOW_C_API_DEF - NPY_ALLOW_C_API; - PyErr_NoMemory(); - NPY_DISABLE_C_API; + report_no_memory(); error: TRACE_TXT("%s failed init\n", __FUNCTION__); @@ -4034,10 +4074,7 @@ using frealtyp = basetype_t; return 1; no_memory: - NPY_ALLOW_C_API_DEF - NPY_ALLOW_C_API; - PyErr_NoMemory(); - NPY_DISABLE_C_API; + report_no_memory(); error: TRACE_TXT("%s failed init\n", __FUNCTION__); @@ -4688,57 +4725,54 @@ static PyMethodDef UMath_LinAlgMethods[] = { {NULL, NULL, 0, NULL} /* Sentinel */ }; -static struct PyModuleDef moduledef = { - PyModuleDef_HEAD_INIT, - UMATH_LINALG_MODULE_NAME, - NULL, - -1, - UMath_LinAlgMethods, - NULL, - NULL, - NULL, - NULL -}; +static int module_loaded = 0; -PyMODINIT_FUNC PyInit__umath_linalg(void) +static int +_umath_linalg_exec(PyObject *m) { - PyObject *m; PyObject *d; PyObject *version; - m = PyModule_Create(&moduledef); - if (m == NULL) { - return NULL; + // https://docs.python.org/3/howto/isolating-extensions.html#opt-out-limiting-to-one-module-object-per-process + if (module_loaded) { + PyErr_SetString(PyExc_ImportError, + "cannot load module more than once per process"); + return -1; } + module_loaded = 1; - import_array(); - import_ufunc(); + if (PyArray_ImportNumPyAPI() < 0) { + return -1; + } + if (PyUFunc_ImportUFuncAPI() < 0) { + return -1; + } d = PyModule_GetDict(m); if (d == NULL) { - return NULL; + return -1; } version = PyUnicode_FromString(umath_linalg_version_string); if (version == NULL) { - return NULL; + return -1; } int ret = PyDict_SetItemString(d, "__version__", version); Py_DECREF(version); if (ret < 0) { - return NULL; + return -1; } /* Load the ufunc operators into the module's namespace */ if (addUfuncs(d) < 0) { - return NULL; + return -1; } #if PY_VERSION_HEX < 0x30d00b3 && !HAVE_EXTERNAL_LAPACK lapack_lite_lock = PyThread_allocate_lock(); if (lapack_lite_lock == NULL) { PyErr_NoMemory(); - return NULL; + return -1; } #endif @@ -4748,10 +4782,30 @@ PyMODINIT_FUNC PyInit__umath_linalg(void) PyDict_SetItemString(d, "_ilp64", Py_False); #endif -#if Py_GIL_DISABLED - // signal this module supports running with the GIL disabled - PyUnstable_Module_SetGIL(m, Py_MOD_GIL_NOT_USED); + return 0; +} + +static struct PyModuleDef_Slot _umath_linalg_slots[] = { + {Py_mod_exec, (void*)_umath_linalg_exec}, +#if PY_VERSION_HEX >= 0x030c00f0 // Python 3.12+ + {Py_mod_multiple_interpreters, Py_MOD_MULTIPLE_INTERPRETERS_NOT_SUPPORTED}, +#endif +#if PY_VERSION_HEX >= 0x030d00f0 // Python 3.13+ + // signal that this module supports running without an active GIL + {Py_mod_gil, Py_MOD_GIL_NOT_USED}, #endif + {0, NULL}, +}; + +static struct PyModuleDef moduledef = { + PyModuleDef_HEAD_INIT, /* m_base */ + "_umath_linalg", /* m_name */ + NULL, /* m_doc */ + 0, /* m_size */ + UMath_LinAlgMethods, /* m_methods */ + _umath_linalg_slots, /* m_slots */ +}; - return m; +PyMODINIT_FUNC PyInit__umath_linalg(void) { + return PyModuleDef_Init(&moduledef); } diff --git a/numpy/ma/core.py b/numpy/ma/core.py index 05ea373a6a12..f6d7b0a8c8e5 100644 --- a/numpy/ma/core.py +++ b/numpy/ma/core.py @@ -35,6 +35,7 @@ amax, amin, angle, + array as narray, # noqa: F401 bool_, expand_dims, finfo, # noqa: F401 @@ -42,11 +43,9 @@ iscomplexobj, ndarray, ) -from numpy import array as narray # noqa: F401 from numpy._core import multiarray as mu from numpy._core.numeric import normalize_axis_tuple from numpy._utils import set_module -from numpy._utils._inspect import formatargspec, getargspec __all__ = [ 'MAError', 'MaskError', 'MaskType', 'MaskedArray', 'abs', 'absolute', @@ -134,18 +133,6 @@ def doc_note(initialdoc, note): return ''.join(notesplit[:1] + [notedoc] + notesplit[1:]) -def get_object_signature(obj): - """ - Get the signature from obj - - """ - try: - sig = formatargspec(*getargspec(obj)) - except TypeError: - sig = '' - return sig - - ############################################################################### # Exceptions # ############################################################################### @@ -181,7 +168,8 @@ class MaskError(MAError): 'S': b'N/A', 'u': 999999, 'V': b'???', - 'U': 'N/A' + 'U': 'N/A', + 'T': 'N/A' } # Add datetime64 and timedelta64 types @@ -264,16 +252,17 @@ def default_fill_value(obj): The default filling value depends on the datatype of the input array or the type of the input scalar: - ======== ======== - datatype default - ======== ======== - bool True - int 999999 - float 1.e20 - complex 1.e20+0j - object '?' - string 'N/A' - ======== ======== + =========== ======== + datatype default + =========== ======== + bool True + int 999999 + float 1.e20 + complex 1.e20+0j + object '?' + string 'N/A' + StringDType 'N/A' + =========== ======== For structured types, a structured scalar is returned, with each field the default fill value for its type. @@ -498,7 +487,7 @@ def _check_fill_value(fill_value, ndtype): fill_value = np.asarray(fill_value, dtype=object) fill_value = np.array(_recursive_set_fill_value(fill_value, ndtype), dtype=ndtype) - elif isinstance(fill_value, str) and (ndtype.char not in 'OSVU'): + elif isinstance(fill_value, str) and (ndtype.char not in 'OSTVU'): # Note this check doesn't work if fill_value is not a scalar err_msg = "Cannot set fill value of string with array of dtype %s" raise TypeError(err_msg % ndtype) @@ -2288,7 +2277,7 @@ def masked_object(x, value, copy=True, shrink=True): -------- >>> import numpy as np >>> import numpy.ma as ma - >>> food = np.array(['green_eggs', 'ham'], dtype=object) + >>> food = np.array(['green_eggs', 'ham'], dtype=np.object_) >>> # don't eat spoiled food >>> eat = ma.masked_object(food, 'green_eggs') >>> eat @@ -2297,7 +2286,7 @@ def masked_object(x, value, copy=True, shrink=True): fill_value='green_eggs', dtype=object) >>> # plain ol` ham is boring - >>> fresh_food = np.array(['cheese', 'ham', 'pineapple'], dtype=object) + >>> fresh_food = np.array(['cheese', 'ham', 'pineapple'], dtype=np.object_) >>> eat = ma.masked_object(fresh_food, 'green_eggs') >>> eat masked_array(data=['cheese', 'ham', 'pineapple'], @@ -2414,7 +2403,7 @@ def masked_invalid(a, copy=True): -------- >>> import numpy as np >>> import numpy.ma as ma - >>> a = np.arange(5, dtype=float) + >>> a = np.arange(5, dtype=np.float64) >>> a[2] = np.nan >>> a[3] = np.inf >>> a @@ -4818,7 +4807,6 @@ def reshape(self, *s, **kwargs): fill_value=999999) """ - kwargs.update(order=kwargs.get('order', 'C')) result = self._data.reshape(*s, **kwargs).view(type(self)) result._update_from(self) mask = self._mask @@ -5454,8 +5442,8 @@ def anom(self, axis=None, dtype=None): The default is to use the mean of the flattened array as reference. dtype : dtype, optional Type to use in computing the variance. For arrays of integer type - the default is float32; for arrays of float types it is the same as - the array type. + the default is float32; for arrays of float types it is the same as + the array type. See Also -------- @@ -5628,7 +5616,7 @@ def argsort(self, axis=np._NoValue, kind=None, order=None, endwith=True, is used. kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, optional The sorting algorithm used. - order : list, optional + order : str or list of str, optional When `a` is an array with fields defined, this argument specifies which fields to compare first, second, etc. Not all fields need be specified. @@ -5812,11 +5800,6 @@ def sort(self, axis=-1, kind=None, order=None, endwith=True, stable : bool, optional Only for compatibility with ``np.sort``. Ignored. - Returns - ------- - sorted_array : ndarray - Array of the same type and shape as `a`. - See Also -------- numpy.ndarray.sort : Method to sort an array in-place. @@ -6656,14 +6639,14 @@ def filled(self, fill_value=None): def tolist(self): """ - Transforms the mvoid object into a tuple. + Transforms the mvoid object into a tuple. - Masked fields are replaced by None. + Masked fields are replaced by None. - Returns - ------- - returned_tuple - Tuple of fields + Returns + ------- + returned_tuple + Tuple of fields """ _mask = self._mask if _mask is nomask: @@ -7051,7 +7034,7 @@ def ptp(obj, axis=None, out=None, fill_value=None, keepdims=np._NoValue): ############################################################################## -class _frommethod: +def _frommethod(methodname: str, reversed: bool = False): """ Define functions from existing MaskedArray methods. @@ -7059,44 +7042,47 @@ class _frommethod: ---------- methodname : str Name of the method to transform. - + reversed : bool, optional + Whether to reverse the first two arguments of the method. Default is False. """ + method = getattr(MaskedArray, methodname) + assert callable(method) - def __init__(self, methodname, reversed=False): - self.__name__ = methodname - self.__qualname__ = methodname - self.__doc__ = self.getdoc() - self.reversed = reversed + signature = inspect.signature(method) + params = list(signature.parameters.values()) + params[0] = params[0].replace(name="a") # rename 'self' to 'a' - def getdoc(self): - "Return the doc of the function (from the doc of the method)." - meth = getattr(MaskedArray, self.__name__, None) or\ - getattr(np, self.__name__, None) - signature = self.__name__ + get_object_signature(meth) - if meth is not None: - doc = f""" {signature} -{getattr(meth, '__doc__', None)}""" - return doc + if reversed: + assert len(params) >= 2 + params[0], params[1] = params[1], params[0] + + def wrapper(a, b, *args, **params): + return getattr(asanyarray(b), methodname)(a, *args, **params) + + else: + def wrapper(a, *args, **params): + return getattr(asanyarray(a), methodname)(*args, **params) - def __call__(self, a, *args, **params): - if self.reversed: - args = list(args) - a, args[0] = args[0], a + wrapper.__signature__ = signature.replace(parameters=params) + wrapper.__name__ = wrapper.__qualname__ = methodname - marr = asanyarray(a) - method_name = self.__name__ - method = getattr(type(marr), method_name, None) - if method is None: - # use the corresponding np function - method = getattr(np, method_name) + # __doc__ is None when using `python -OO ...` + if method.__doc__ is not None: + str_signature = f"{methodname}{signature}" + # TODO: For methods with a docstring "Parameters" section, that do not already + # mention `a` (see e.g. `MaskedArray.var.__doc__`), it should be inserted there. + wrapper.__doc__ = f" {str_signature}\n{method.__doc__}" - return method(marr, *args, **params) + return wrapper all = _frommethod('all') anomalies = anom = _frommethod('anom') any = _frommethod('any') +argmax = _frommethod('argmax') +argmin = _frommethod('argmin') compress = _frommethod('compress', reversed=True) +count = _frommethod('count') cumprod = _frommethod('cumprod') cumsum = _frommethod('cumsum') copy = _frommethod('copy') @@ -7120,7 +7106,6 @@ def __call__(self, a, *args, **params): trace = _frommethod('trace') var = _frommethod('var') -count = _frommethod('count') def take(a, indices, axis=None, out=None, mode='raise'): """ @@ -7209,9 +7194,6 @@ def power(a, b, third=None): return result -argmin = _frommethod('argmin') -argmax = _frommethod('argmax') - def argsort(a, axis=np._NoValue, kind=None, order=None, endwith=True, fill_value=None, *, stable=None): "Function version of the eponymous method." @@ -8620,7 +8602,7 @@ def asarray(a, dtype=None, order=None): subok=False, order=order) -def asanyarray(a, dtype=None): +def asanyarray(a, dtype=None, order=None): """ Convert the input to a masked array, conserving subclasses. @@ -8633,9 +8615,13 @@ def asanyarray(a, dtype=None): Input data, in any form that can be converted to an array. dtype : dtype, optional By default, the data-type is inferred from the input data. - order : {'C', 'F'}, optional - Whether to use row-major ('C') or column-major ('FORTRAN') memory - representation. Default is 'C'. + order : {'C', 'F', 'A', 'K'}, optional + Memory layout. 'A' and 'K' depend on the order of input array ``a``. + 'C' row-major (C-style), + 'F' column-major (Fortran-style) memory representation. + 'A' (any) means 'F' if ``a`` is Fortran contiguous, 'C' otherwise + 'K' (keep) preserve input order + Defaults to 'K'. Returns ------- @@ -8665,9 +8651,18 @@ def asanyarray(a, dtype=None): """ # workaround for #8666, to preserve identity. Ideally the bottom line # would handle this for us. - if isinstance(a, MaskedArray) and (dtype is None or dtype == a.dtype): + if ( + isinstance(a, MaskedArray) + and (dtype is None or dtype == a.dtype) + and ( + order in {None, 'A', 'K'} + or order == 'C' and a.flags.carray + or order == 'F' and a.flags.f_contiguous + ) + ): return a - return masked_array(a, dtype=dtype, copy=False, keep_mask=True, subok=True) + return masked_array(a, dtype=dtype, copy=False, keep_mask=True, subok=True, + order=order) ############################################################################## @@ -8745,78 +8740,76 @@ def fromflex(fxarray): return masked_array(fxarray['_data'], mask=fxarray['_mask']) -class _convert2ma: +def _convert2ma(funcname: str, np_ret: str, np_ma_ret: str, + params: dict[str, str] | None = None): + """Convert function from numpy to numpy.ma.""" + func = getattr(np, funcname) + params = params or {} - """ - Convert functions from numpy to numpy.ma. + @functools.wraps(func, assigned=set(functools.WRAPPER_ASSIGNMENTS) - {"__module__"}) + def wrapper(*args, **kwargs): + common_params = kwargs.keys() & params.keys() + extras = params | {p: kwargs.pop(p) for p in common_params} - Parameters - ---------- - _methodname : string - Name of the method to transform. + result = func.__call__(*args, **kwargs).view(MaskedArray) - """ - __doc__ = None + if "fill_value" in common_params: + result.fill_value = extras["fill_value"] + if "hardmask" in common_params: + result._hardmask = bool(extras["hardmask"]) + + return result - def __init__(self, funcname, np_ret, np_ma_ret, params=None): - self._func = getattr(np, funcname) - self.__doc__ = self.getdoc(np_ret, np_ma_ret) - self._extras = params or {} + # workaround for a doctest bug in Python 3.11 that incorrectly assumes `__code__` + # exists on wrapped functions + del wrapper.__wrapped__ - def getdoc(self, np_ret, np_ma_ret): - "Return the doc of the function (from the doc of the method)." - doc = getattr(self._func, '__doc__', None) - sig = get_object_signature(self._func) - if doc: - doc = self._replace_return_type(doc, np_ret, np_ma_ret) - # Add the signature of the function at the beginning of the doc - if sig: - sig = f"{self._func.__name__}{sig}\n" - doc = sig + doc - return doc + # `arange`, `empty`, `empty_like`, `frombuffer`, and `zeros` have no signature + try: + signature = inspect.signature(func) + except ValueError: + signature = inspect.Signature([ + inspect.Parameter('args', inspect.Parameter.VAR_POSITIONAL), + inspect.Parameter('kwargs', inspect.Parameter.VAR_KEYWORD), + ]) + + if params: + sig_params = list(signature.parameters.values()) + + # pop `**kwargs` if present + sig_kwargs = None + if sig_params[-1].kind is inspect.Parameter.VAR_KEYWORD: + sig_kwargs = sig_params.pop() + + # add new keyword-only parameters + for param_name, default in params.items(): + new_param = inspect.Parameter( + param_name, + inspect.Parameter.KEYWORD_ONLY, + default=default, + ) + sig_params.append(new_param) - def _replace_return_type(self, doc, np_ret, np_ma_ret): - """ - Replace documentation of ``np`` function's return type. + # re-append `**kwargs` if it was present + if sig_kwargs: + sig_params.append(sig_kwargs) - Replaces it with the proper type for the ``np.ma`` function. + signature = signature.replace(parameters=sig_params) - Parameters - ---------- - doc : str - The documentation of the ``np`` method. - np_ret : str - The return type string of the ``np`` method that we want to - replace. (e.g. "out : ndarray") - np_ma_ret : str - The return type string of the ``np.ma`` method. - (e.g. "out : MaskedArray") - """ - if np_ret not in doc: - raise RuntimeError( - f"Failed to replace `{np_ret}` with `{np_ma_ret}`. " - f"The documentation string for return type, {np_ret}, is not " - f"found in the docstring for `np.{self._func.__name__}`. " - f"Fix the docstring for `np.{self._func.__name__}` or " - "update the expected string for return type." - ) + wrapper.__signature__ = signature - return doc.replace(np_ret, np_ma_ret) + # __doc__ is None when using `python -OO ...` + if func.__doc__ is not None: + assert np_ret in func.__doc__, ( + f"Failed to replace `{np_ret}` with `{np_ma_ret}`. " + f"The documentation string for return type, {np_ret}, is not " + f"found in the docstring for `np.{func.__name__}`. " + f"Fix the docstring for `np.{func.__name__}` or " + "update the expected string for return type." + ) + wrapper.__doc__ = inspect.cleandoc(func.__doc__).replace(np_ret, np_ma_ret) - def __call__(self, *args, **params): - # Find the common parameters to the call and the definition - _extras = self._extras - common_params = set(params).intersection(_extras) - # Drop the common parameters from the call - for p in common_params: - _extras[p] = params.pop(p) - # Get the result - result = self._func.__call__(*args, **params).view(MaskedArray) - if "fill_value" in common_params: - result.fill_value = _extras.get("fill_value", None) - if "hardmask" in common_params: - result._hardmask = bool(_extras.get("hard_mask", False)) - return result + return wrapper arange = _convert2ma( diff --git a/numpy/ma/core.pyi b/numpy/ma/core.pyi index f457f18d57bd..a5e8e41b7709 100644 --- a/numpy/ma/core.pyi +++ b/numpy/ma/core.pyi @@ -1,63 +1,107 @@ +# mypy: disable-error-code=no-untyped-def # pyright: reportIncompatibleMethodOverride=false -# ruff: noqa: ANN001, ANN002, ANN003, ANN201, ANN202 ANN204, ANN401 - -from collections.abc import Sequence -from typing import Any, Literal, Self, SupportsIndex, TypeAlias, overload +import datetime as dt +import types from _typeshed import Incomplete -from typing_extensions import TypeIs, TypeVar, deprecated +from collections.abc import Buffer, Callable, Sequence +from typing import ( + Any, + Concatenate, + Final, + Generic, + Literal, + Never, + NoReturn, + Self, + SupportsComplex, + SupportsFloat, + SupportsIndex, + SupportsInt, + Unpack, + final, + overload, + override, +) +from typing_extensions import TypeIs, TypeVar import numpy as np from numpy import ( + _HasDType, _HasDTypeWithRealAndImag, _ModeKind, + _OrderACF, + _OrderCF, _OrderKACF, _PartitionKind, _SortKind, + _ToIndices, amax, amin, bool_, bytes_, - character, + complex128, complexfloating, datetime64, dtype, - dtypes, expand_dims, float64, floating, generic, + inexact, + int8, + int64, int_, + integer, intp, ndarray, + number, object_, signedinteger, str_, timedelta64, unsignedinteger, ) +from numpy._core.fromnumeric import _UFuncKwargs # type-check only from numpy._globals import _NoValueType from numpy._typing import ( ArrayLike, + DTypeLike, NDArray, + _32Bit, + _64Bit, _AnyShape, _ArrayLike, _ArrayLikeBool_co, _ArrayLikeBytes_co, + _ArrayLikeComplex128_co, _ArrayLikeComplex_co, + _ArrayLikeDT64_co, + _ArrayLikeFloat64_co, _ArrayLikeFloat_co, _ArrayLikeInt, _ArrayLikeInt_co, + _ArrayLikeNumber_co, + _ArrayLikeObject_co, _ArrayLikeStr_co, _ArrayLikeString_co, _ArrayLikeTD64_co, _ArrayLikeUInt_co, + _CharLike_co, + _DTypeLike, _DTypeLikeBool, + _DTypeLikeVoid, + _FloatLike_co, _IntLike_co, + _NestedSequence, _ScalarLike_co, _Shape, _ShapeLike, + _SupportsArrayFunc, + _SupportsDType, + _TD64Like_co, ) +from numpy._typing._dtype_like import _VoidDTypeLike __all__ = [ "MAError", @@ -240,199 +284,882 @@ __all__ = [ "zeros_like", ] -_ShapeT = TypeVar("_ShapeT", bound=_Shape) _ShapeT_co = TypeVar("_ShapeT_co", bound=_Shape, default=_AnyShape, covariant=True) -_DTypeT = TypeVar("_DTypeT", bound=dtype) -_DTypeT_co = TypeVar("_DTypeT_co", bound=dtype, default=dtype, covariant=True) -_ArrayT = TypeVar("_ArrayT", bound=ndarray[Any, Any]) -_ScalarT = TypeVar("_ScalarT", bound=generic) -_ScalarT_co = TypeVar("_ScalarT_co", bound=generic, covariant=True) +_DTypeT_co = TypeVar("_DTypeT_co", bound=np.dtype, default=np.dtype, covariant=True) +# the additional `Callable[...]` bound simplifies self-binding to the ufunc's callable signature +_UFuncT_co = TypeVar("_UFuncT_co", bound=np.ufunc | Callable[..., object], default=np.ufunc, covariant=True) + +type _RealNumber = np.floating | np.integer + +type _Ignored = object + # A subset of `MaskedArray` that can be parametrized w.r.t. `np.generic` -_MaskedArray: TypeAlias = MaskedArray[_AnyShape, dtype[_ScalarT]] -_Array1D: TypeAlias = np.ndarray[tuple[int], np.dtype[_ScalarT]] +type _MaskedArray[ScalarT: np.generic] = MaskedArray[_AnyShape, np.dtype[ScalarT]] +type _Masked1D[ScalarT: np.generic] = MaskedArray[tuple[int], np.dtype[ScalarT]] + +type _MaskedArrayUInt_co = _MaskedArray[np.unsignedinteger | np.bool] +type _MaskedArrayInt_co = _MaskedArray[np.integer | np.bool] +type _MaskedArrayFloat64_co = _MaskedArray[np.floating[_64Bit] | np.float32 | np.float16 | np.integer | np.bool] +type _MaskedArrayFloat_co = _MaskedArray[np.floating | np.integer | np.bool] +type _MaskedArrayComplex128_co = _MaskedArray[np.number[_64Bit] | np.number[_32Bit] | np.float16 | np.integer | np.bool] +type _MaskedArrayComplex_co = _MaskedArray[np.inexact | np.integer | np.bool] +type _MaskedArrayNumber_co = _MaskedArray[np.number | np.bool] +type _MaskedArrayTD64_co = _MaskedArray[np.timedelta64 | np.integer | np.bool] + +type _ArrayInt_co = NDArray[np.integer | np.bool] +type _Array1D[ScalarT: np.generic] = np.ndarray[tuple[int], np.dtype[ScalarT]] + +type _ConvertibleToInt = SupportsInt | SupportsIndex | _CharLike_co +type _ConvertibleToFloat = SupportsFloat | SupportsIndex | _CharLike_co +type _ConvertibleToComplex = SupportsComplex | SupportsFloat | SupportsIndex | _CharLike_co +type _ConvertibleToTD64 = dt.timedelta | int | _CharLike_co | np.character | np.number | np.timedelta64 | np.bool | None +type _ConvertibleToDT64 = dt.date | int | _CharLike_co | np.character | np.number | np.datetime64 | np.bool | None +type _ArangeScalar = _RealNumber | np.datetime64 | np.timedelta64 -MaskType = bool_ -nomask: bool_[Literal[False]] +type _NoMaskType = np.bool_[Literal[False]] # type of `np.False_` +type _MaskArray[ShapeT: _Shape] = np.ndarray[ShapeT, np.dtype[np.bool]] + +type _FillValue = complex | None # int | float | complex | None +type _FillValueCallable = Callable[[np.dtype | ArrayLike], _FillValue] +type _DomainCallable = Callable[..., NDArray[np.bool]] + +### + +MaskType = np.bool_ + +nomask: Final[_NoMaskType] = ... class MaskedArrayFutureWarning(FutureWarning): ... class MAError(Exception): ... class MaskError(MAError): ... -def default_fill_value(obj): ... -def minimum_fill_value(obj): ... -def maximum_fill_value(obj): ... -def set_fill_value(a, fill_value): ... -def common_fill_value(a, b): ... +# not generic at runtime +class _MaskedUFunc(Generic[_UFuncT_co]): + f: _UFuncT_co # readonly + def __init__(self, /, ufunc: _UFuncT_co) -> None: ... + +# not generic at runtime +class _MaskedUnaryOperation(_MaskedUFunc[_UFuncT_co], Generic[_UFuncT_co]): + fill: Final[_FillValue] + domain: Final[_DomainCallable | None] + + def __init__(self, /, mufunc: _UFuncT_co, fill: _FillValue = 0, domain: _DomainCallable | None = None) -> None: ... + + # NOTE: This might not work with overloaded callable signatures might not work on + # pyright, which is a long-standing issue, and is unique to pyright: + # https://github.com/microsoft/pyright/issues/9663 + # https://github.com/microsoft/pyright/issues/10849 + # https://github.com/microsoft/pyright/issues/10899 + # https://github.com/microsoft/pyright/issues/11049 + def __call__[**Tss, T]( + self: _MaskedUnaryOperation[Callable[Concatenate[Any, Tss], T]], + /, + a: ArrayLike, + *args: Tss.args, + **kwargs: Tss.kwargs, + ) -> T: ... + +# not generic at runtime +class _MaskedBinaryOperation(_MaskedUFunc[_UFuncT_co], Generic[_UFuncT_co]): + fillx: Final[_FillValue] + filly: Final[_FillValue] + + def __init__(self, /, mbfunc: _UFuncT_co, fillx: _FillValue = 0, filly: _FillValue = 0) -> None: ... + + # NOTE: See the comment in `_MaskedUnaryOperation.__call__` + def __call__[**Tss, T]( + self: _MaskedBinaryOperation[Callable[Concatenate[Any, Any, Tss], T]], + /, + a: ArrayLike, + b: ArrayLike, + *args: Tss.args, + **kwargs: Tss.kwargs, + ) -> T: ... + + # NOTE: We cannot meaningfully annotate the return (d)types of these methods until + # the signatures of the corresponding `numpy.ufunc` methods are specified. + def reduce(self, /, target: ArrayLike, axis: SupportsIndex = 0, dtype: DTypeLike | None = None) -> Incomplete: ... + def outer(self, /, a: ArrayLike, b: ArrayLike) -> _MaskedArray[Incomplete]: ... + def accumulate(self, /, target: ArrayLike, axis: SupportsIndex = 0) -> _MaskedArray[Incomplete]: ... + +# not generic at runtime +class _DomainedBinaryOperation(_MaskedUFunc[_UFuncT_co], Generic[_UFuncT_co]): + domain: Final[_DomainCallable] + fillx: Final[_FillValue] + filly: Final[_FillValue] + + def __init__( + self, + /, + dbfunc: _UFuncT_co, + domain: _DomainCallable, + fillx: _FillValue = 0, + filly: _FillValue = 0, + ) -> None: ... + + # NOTE: See the comment in `_MaskedUnaryOperation.__call__` + def __call__[**Tss, T]( + self: _DomainedBinaryOperation[Callable[Concatenate[Any, Any, Tss], T]], + /, + a: ArrayLike, + b: ArrayLike, + *args: Tss.args, + **kwargs: Tss.kwargs, + ) -> T: ... + +# not generic at runtime +class _extrema_operation(_MaskedUFunc[_UFuncT_co], Generic[_UFuncT_co]): + compare: Final[_MaskedBinaryOperation] + fill_value_func: Final[_FillValueCallable] + + def __init__( + self, + /, + ufunc: _UFuncT_co, + compare: _MaskedBinaryOperation, + fill_value: _FillValueCallable, + ) -> None: ... + + # NOTE: This class is only used internally for `maximum` and `minimum`, so we are + # able to annotate the `__call__` method specifically for those two functions. + @overload + def __call__[ScalarT: np.generic](self, /, a: _ArrayLike[ScalarT], b: _ArrayLike[ScalarT]) -> _MaskedArray[ScalarT]: ... + @overload + def __call__(self, /, a: ArrayLike, b: ArrayLike) -> _MaskedArray[Incomplete]: ... + + # NOTE: We cannot meaningfully annotate the return (d)types of these methods until + # the signatures of the corresponding `numpy.ufunc` methods are specified. + def reduce(self, /, target: ArrayLike, axis: SupportsIndex | _NoValueType = ...) -> Incomplete: ... + def outer(self, /, a: ArrayLike, b: ArrayLike) -> _MaskedArray[Incomplete]: ... + +@final +class _MaskedPrintOption: + _display: str + _enabled: bool | Literal[0, 1] + def __init__(self, /, display: str) -> None: ... + def display(self, /) -> str: ... + def set_display(self, /, s: str) -> None: ... + def enabled(self, /) -> bool: ... + def enable(self, /, shrink: bool | Literal[0, 1] = 1) -> None: ... + +masked_print_option: Final[_MaskedPrintOption] = ... + +exp: _MaskedUnaryOperation = ... +conjugate: _MaskedUnaryOperation = ... +sin: _MaskedUnaryOperation = ... +cos: _MaskedUnaryOperation = ... +arctan: _MaskedUnaryOperation = ... +arcsinh: _MaskedUnaryOperation = ... +sinh: _MaskedUnaryOperation = ... +cosh: _MaskedUnaryOperation = ... +tanh: _MaskedUnaryOperation = ... +abs: _MaskedUnaryOperation = ... +absolute: _MaskedUnaryOperation = ... +angle: _MaskedUnaryOperation = ... +fabs: _MaskedUnaryOperation = ... +negative: _MaskedUnaryOperation = ... +floor: _MaskedUnaryOperation = ... +ceil: _MaskedUnaryOperation = ... +around: _MaskedUnaryOperation = ... +logical_not: _MaskedUnaryOperation = ... +sqrt: _MaskedUnaryOperation = ... +log: _MaskedUnaryOperation = ... +log2: _MaskedUnaryOperation = ... +log10: _MaskedUnaryOperation = ... +tan: _MaskedUnaryOperation = ... +arcsin: _MaskedUnaryOperation = ... +arccos: _MaskedUnaryOperation = ... +arccosh: _MaskedUnaryOperation = ... +arctanh: _MaskedUnaryOperation = ... + +add: _MaskedBinaryOperation = ... +subtract: _MaskedBinaryOperation = ... +multiply: _MaskedBinaryOperation = ... +arctan2: _MaskedBinaryOperation = ... +equal: _MaskedBinaryOperation = ... +not_equal: _MaskedBinaryOperation = ... +less_equal: _MaskedBinaryOperation = ... +greater_equal: _MaskedBinaryOperation = ... +less: _MaskedBinaryOperation = ... +greater: _MaskedBinaryOperation = ... +logical_and: _MaskedBinaryOperation = ... +def alltrue(target: ArrayLike, axis: SupportsIndex | None = 0, dtype: _DTypeLikeBool | None = None) -> Incomplete: ... +logical_or: _MaskedBinaryOperation = ... +def sometrue(target: ArrayLike, axis: SupportsIndex | None = 0, dtype: _DTypeLikeBool | None = None) -> Incomplete: ... +logical_xor: _MaskedBinaryOperation = ... +bitwise_and: _MaskedBinaryOperation = ... +bitwise_or: _MaskedBinaryOperation = ... +bitwise_xor: _MaskedBinaryOperation = ... +hypot: _MaskedBinaryOperation = ... + +divide: _DomainedBinaryOperation = ... +true_divide: _DomainedBinaryOperation = ... +floor_divide: _DomainedBinaryOperation = ... +remainder: _DomainedBinaryOperation = ... +fmod: _DomainedBinaryOperation = ... +mod: _DomainedBinaryOperation = ... + +# `obj` can be anything (even `object()`), and is too "flexible", so we can't +# meaningfully annotate it, or its return type. +def default_fill_value(obj: object) -> Any: ... +def minimum_fill_value(obj: object) -> Any: ... +def maximum_fill_value(obj: object) -> Any: ... + +# +@overload # returns `a.fill_value` if `a` is a `MaskedArray` +def get_fill_value[ScalarT: np.generic](a: _MaskedArray[ScalarT]) -> ScalarT: ... +@overload # otherwise returns `default_fill_value(a)` +def get_fill_value(a: object) -> Any: ... + +# this is a noop if `a` isn't a `MaskedArray`, so we only accept `MaskedArray` input +def set_fill_value(a: MaskedArray, fill_value: _ScalarLike_co) -> None: ... + +# the return type depends on the *values* of `a` and `b` (which cannot be known +# statically), which is why we need to return an awkward `_ | None` @overload -def filled(a: ndarray[_ShapeT_co, _DTypeT_co], fill_value: _ScalarLike_co | None = None) -> ndarray[_ShapeT_co, _DTypeT_co]: ... +def common_fill_value[ScalarT: np.generic](a: _MaskedArray[ScalarT], b: MaskedArray) -> ScalarT | None: ... @overload -def filled(a: _ArrayLike[_ScalarT_co], fill_value: _ScalarLike_co | None = None) -> NDArray[_ScalarT_co]: ... +def common_fill_value(a: object, b: object) -> Any: ... + +# keep in sync with `fix_invalid`, but return `ndarray` instead of `MaskedArray` @overload -def filled(a: ArrayLike, fill_value: _ScalarLike_co | None = None) -> NDArray[Any]: ... -def getdata(a, subok=...): ... -get_data = getdata +def filled[ShapeT: _Shape, DTypeT: np.dtype]( + a: ndarray[ShapeT, DTypeT], + fill_value: _ScalarLike_co | None = None, +) -> ndarray[ShapeT, DTypeT]: ... +@overload +def filled[ScalarT: np.generic](a: _ArrayLike[ScalarT], fill_value: _ScalarLike_co | None = None) -> NDArray[ScalarT]: ... +@overload +def filled(a: ArrayLike, fill_value: _ScalarLike_co | None = None) -> NDArray[Incomplete]: ... -def fix_invalid(a, mask=..., copy=..., fill_value=...): ... - -class _MaskedUFunc: - f: Any - __doc__: Any - __name__: Any - def __init__(self, ufunc): ... - -class _MaskedUnaryOperation(_MaskedUFunc): - fill: Any - domain: Any - def __init__(self, mufunc, fill=..., domain=...): ... - def __call__(self, a, *args, **kwargs): ... - -class _MaskedBinaryOperation(_MaskedUFunc): - fillx: Any - filly: Any - def __init__(self, mbfunc, fillx=..., filly=...): ... - def __call__(self, a, b, *args, **kwargs): ... - def reduce(self, target, axis=..., dtype=...): ... - def outer(self, a, b): ... - def accumulate(self, target, axis=...): ... - -class _DomainedBinaryOperation(_MaskedUFunc): - domain: Any - fillx: Any - filly: Any - def __init__(self, dbfunc, domain, fillx=..., filly=...): ... - def __call__(self, a, b, *args, **kwargs): ... - -exp: _MaskedUnaryOperation -conjugate: _MaskedUnaryOperation -sin: _MaskedUnaryOperation -cos: _MaskedUnaryOperation -arctan: _MaskedUnaryOperation -arcsinh: _MaskedUnaryOperation -sinh: _MaskedUnaryOperation -cosh: _MaskedUnaryOperation -tanh: _MaskedUnaryOperation -abs: _MaskedUnaryOperation -absolute: _MaskedUnaryOperation -angle: _MaskedUnaryOperation -fabs: _MaskedUnaryOperation -negative: _MaskedUnaryOperation -floor: _MaskedUnaryOperation -ceil: _MaskedUnaryOperation -around: _MaskedUnaryOperation -logical_not: _MaskedUnaryOperation -sqrt: _MaskedUnaryOperation -log: _MaskedUnaryOperation -log2: _MaskedUnaryOperation -log10: _MaskedUnaryOperation -tan: _MaskedUnaryOperation -arcsin: _MaskedUnaryOperation -arccos: _MaskedUnaryOperation -arccosh: _MaskedUnaryOperation -arctanh: _MaskedUnaryOperation - -add: _MaskedBinaryOperation -subtract: _MaskedBinaryOperation -multiply: _MaskedBinaryOperation -arctan2: _MaskedBinaryOperation -equal: _MaskedBinaryOperation -not_equal: _MaskedBinaryOperation -less_equal: _MaskedBinaryOperation -greater_equal: _MaskedBinaryOperation -less: _MaskedBinaryOperation -greater: _MaskedBinaryOperation -logical_and: _MaskedBinaryOperation -def alltrue(target: ArrayLike, axis: SupportsIndex | None = 0, dtype: _DTypeLikeBool | None = None) -> Incomplete: ... -logical_or: _MaskedBinaryOperation -def sometrue(target: ArrayLike, axis: SupportsIndex | None = 0, dtype: _DTypeLikeBool | None = None) -> Incomplete: ... -logical_xor: _MaskedBinaryOperation -bitwise_and: _MaskedBinaryOperation -bitwise_or: _MaskedBinaryOperation -bitwise_xor: _MaskedBinaryOperation -hypot: _MaskedBinaryOperation +# keep in sync with `filled`, but return `MaskedArray` instead of `ndarray` +@overload +def fix_invalid[ShapeT: _Shape, DTypeT: np.dtype]( + a: np.ndarray[ShapeT, DTypeT], + mask: _ArrayLikeBool_co = nomask, + copy: bool = True, + fill_value: _ScalarLike_co | None = None, +) -> MaskedArray[ShapeT, DTypeT]: ... +@overload +def fix_invalid[ScalarT: np.generic]( + a: _ArrayLike[ScalarT], + mask: _ArrayLikeBool_co = nomask, + copy: bool = True, + fill_value: _ScalarLike_co | None = None, +) -> _MaskedArray[ScalarT]: ... +@overload +def fix_invalid( + a: ArrayLike, + mask: _ArrayLikeBool_co = nomask, + copy: bool = True, + fill_value: _ScalarLike_co | None = None, +) -> _MaskedArray[Incomplete]: ... + +# +def get_masked_subclass(*arrays: object) -> type[MaskedArray]: ... -divide: _DomainedBinaryOperation -true_divide: _DomainedBinaryOperation -floor_divide: _DomainedBinaryOperation -remainder: _DomainedBinaryOperation -fmod: _DomainedBinaryOperation -mod: _DomainedBinaryOperation +# +@overload +def getdata[ShapeT: _Shape, DTypeT: np.dtype]( + a: np.ndarray[ShapeT, DTypeT], + subok: bool = True, +) -> np.ndarray[ShapeT, DTypeT]: ... +@overload +def getdata[ScalarT: np.generic](a: _ArrayLike[ScalarT], subok: bool = True) -> NDArray[ScalarT]: ... +@overload +def getdata(a: ArrayLike, subok: bool = True) -> NDArray[Incomplete]: ... -def make_mask_descr(ndtype): ... +get_data = getdata +# @overload -def getmask(a: _ScalarLike_co) -> bool_: ... +def getmask(a: _ScalarLike_co) -> _NoMaskType: ... @overload -def getmask(a: MaskedArray[_ShapeT_co, Any]) -> np.ndarray[_ShapeT_co, dtype[bool_]] | bool_: ... +def getmask[ShapeT: _Shape](a: MaskedArray[ShapeT, Any]) -> _MaskArray[ShapeT] | _NoMaskType: ... @overload -def getmask(a: ArrayLike) -> NDArray[bool_] | bool_: ... +def getmask(a: ArrayLike) -> _MaskArray[_AnyShape] | _NoMaskType: ... get_mask = getmask -def getmaskarray(arr): ... +# like `getmask`, but instead of `nomask` returns `make_mask_none(arr, arr.dtype?)` +@overload +def getmaskarray(arr: _ScalarLike_co) -> _MaskArray[tuple[()]]: ... +@overload +def getmaskarray[ShapeT: _Shape](arr: np.ndarray[ShapeT, Any]) -> _MaskArray[ShapeT]: ... # It's sufficient for `m` to have dtype with type: `type[np.bool_]`, # which isn't necessarily a ndarray. Please open an issue if this causes issues. def is_mask(m: object) -> TypeIs[NDArray[bool_]]: ... -def make_mask(m, copy=..., shrink=..., dtype=...): ... -def make_mask_none(newshape, dtype=...): ... -def mask_or(m1, m2, copy=..., shrink=...): ... -def flatten_mask(mask): ... -def masked_where(condition, a, copy=...): ... -def masked_greater(x, value, copy=...): ... -def masked_greater_equal(x, value, copy=...): ... -def masked_less(x, value, copy=...): ... -def masked_less_equal(x, value, copy=...): ... -def masked_not_equal(x, value, copy=...): ... -def masked_equal(x, value, copy=...): ... -def masked_inside(x, v1, v2, copy=...): ... -def masked_outside(x, v1, v2, copy=...): ... -def masked_object(x, value, copy=..., shrink=...): ... -def masked_values(x, value, rtol=..., atol=..., copy=..., shrink=...): ... -def masked_invalid(a, copy=...): ... +# +@overload +def make_mask_descr(ndtype: _VoidDTypeLike) -> np.dtype[np.void]: ... +@overload +def make_mask_descr(ndtype: _DTypeLike[np.generic] | str | type) -> np.dtype[np.bool_]: ... + +# +@overload # m is nomask +def make_mask( + m: _NoMaskType, + copy: bool = False, + shrink: bool = True, + dtype: _DTypeLikeBool = ..., +) -> _NoMaskType: ... +@overload # m: ndarray, shrink=True (default), dtype: bool-like (default) +def make_mask[ShapeT: _Shape]( + m: np.ndarray[ShapeT], + copy: bool = False, + shrink: Literal[True] = True, + dtype: _DTypeLikeBool = ..., +) -> _MaskArray[ShapeT] | _NoMaskType: ... +@overload # m: ndarray, shrink=False (kwarg), dtype: bool-like (default) +def make_mask[ShapeT: _Shape]( + m: np.ndarray[ShapeT], + copy: bool = False, + *, + shrink: Literal[False], + dtype: _DTypeLikeBool = ..., +) -> _MaskArray[ShapeT]: ... +@overload # m: ndarray, dtype: void-like +def make_mask[ShapeT: _Shape]( + m: np.ndarray[ShapeT], + copy: bool = False, + shrink: bool = True, + *, + dtype: _DTypeLikeVoid, +) -> np.ndarray[ShapeT, np.dtype[np.void]]: ... +@overload # m: array-like, shrink=True (default), dtype: bool-like (default) +def make_mask( + m: ArrayLike, + copy: bool = False, + shrink: Literal[True] = True, + dtype: _DTypeLikeBool = ..., +) -> _MaskArray[_AnyShape] | _NoMaskType: ... +@overload # m: array-like, shrink=False (kwarg), dtype: bool-like (default) +def make_mask( + m: ArrayLike, + copy: bool = False, + *, + shrink: Literal[False], + dtype: _DTypeLikeBool = ..., +) -> _MaskArray[_AnyShape]: ... +@overload # m: array-like, dtype: void-like +def make_mask( + m: ArrayLike, + copy: bool = False, + shrink: bool = True, + *, + dtype: _DTypeLikeVoid, +) -> NDArray[np.void]: ... +@overload # fallback +def make_mask( + m: ArrayLike, + copy: bool = False, + shrink: bool = True, + *, + dtype: DTypeLike = ..., +) -> NDArray[Incomplete] | _NoMaskType: ... -class _MaskedPrintOption: - def __init__(self, display): ... - def display(self): ... - def set_display(self, s): ... - def enabled(self): ... - def enable(self, shrink=...): ... +# +@overload # known shape, dtype: unstructured (default) +def make_mask_none[ShapeT: _Shape](newshape: ShapeT, dtype: np.dtype | type | str | None = None) -> _MaskArray[ShapeT]: ... +@overload # known shape, dtype: structured +def make_mask_none[ShapeT: _Shape](newshape: ShapeT, dtype: _VoidDTypeLike) -> np.ndarray[ShapeT, dtype[np.void]]: ... +@overload # unknown shape, dtype: unstructured (default) +def make_mask_none(newshape: _ShapeLike, dtype: np.dtype | type | str | None = None) -> _MaskArray[_AnyShape]: ... +@overload # unknown shape, dtype: structured +def make_mask_none(newshape: _ShapeLike, dtype: _VoidDTypeLike) -> NDArray[np.void]: ... + +# +@overload # nomask, scalar-like, shrink=True (default) +def mask_or( + m1: _NoMaskType | Literal[False], + m2: _ScalarLike_co, + copy: bool = False, + shrink: Literal[True] = True, +) -> _NoMaskType: ... +@overload # nomask, scalar-like, shrink=False (kwarg) +def mask_or( + m1: _NoMaskType | Literal[False], + m2: _ScalarLike_co, + copy: bool = False, + *, + shrink: Literal[False], +) -> _MaskArray[tuple[()]]: ... +@overload # scalar-like, nomask, shrink=True (default) +def mask_or( + m1: _ScalarLike_co, + m2: _NoMaskType | Literal[False], + copy: bool = False, + shrink: Literal[True] = True, +) -> _NoMaskType: ... +@overload # scalar-like, nomask, shrink=False (kwarg) +def mask_or( + m1: _ScalarLike_co, + m2: _NoMaskType | Literal[False], + copy: bool = False, + *, + shrink: Literal[False], +) -> _MaskArray[tuple[()]]: ... +@overload # ndarray, ndarray | nomask, shrink=True (default) +def mask_or[ShapeT: _Shape, ScalarT: np.generic]( + m1: np.ndarray[ShapeT, np.dtype[ScalarT]], + m2: np.ndarray[ShapeT, np.dtype[ScalarT]] | _NoMaskType | Literal[False], + copy: bool = False, + shrink: Literal[True] = True, +) -> _MaskArray[ShapeT] | _NoMaskType: ... +@overload # ndarray, ndarray | nomask, shrink=False (kwarg) +def mask_or[ShapeT: _Shape, ScalarT: np.generic]( + m1: np.ndarray[ShapeT, np.dtype[ScalarT]], + m2: np.ndarray[ShapeT, np.dtype[ScalarT]] | _NoMaskType | Literal[False], + copy: bool = False, + *, + shrink: Literal[False], +) -> _MaskArray[ShapeT]: ... +@overload # ndarray | nomask, ndarray, shrink=True (default) +def mask_or[ShapeT: _Shape, ScalarT: np.generic]( + m1: np.ndarray[ShapeT, np.dtype[ScalarT]] | _NoMaskType | Literal[False], + m2: np.ndarray[ShapeT, np.dtype[ScalarT]], + copy: bool = False, + shrink: Literal[True] = True, +) -> _MaskArray[ShapeT] | _NoMaskType: ... +@overload # ndarray | nomask, ndarray, shrink=False (kwarg) +def mask_or[ShapeT: _Shape, ScalarT: np.generic]( + m1: np.ndarray[ShapeT, np.dtype[ScalarT]] | _NoMaskType | Literal[False], + m2: np.ndarray[ShapeT, np.dtype[ScalarT]], + copy: bool = False, + *, + shrink: Literal[False], +) -> _MaskArray[ShapeT]: ... + +# +@overload +def flatten_mask[ShapeT: _Shape](mask: np.ndarray[ShapeT]) -> _MaskArray[ShapeT]: ... +@overload +def flatten_mask(mask: ArrayLike) -> _MaskArray[_AnyShape]: ... + +# NOTE: we currently don't know the field types of `void` dtypes, so it's not possible +# to know the output dtype of the returned array. +@overload +def flatten_structured_array[ShapeT: _Shape](a: MaskedArray[ShapeT, np.dtype[np.void]]) -> MaskedArray[ShapeT]: ... +@overload +def flatten_structured_array[ShapeT: _Shape](a: np.ndarray[ShapeT, np.dtype[np.void]]) -> np.ndarray[ShapeT]: ... +@overload # for some reason this accepts unstructured array-likes, hence this fallback overload +def flatten_structured_array(a: ArrayLike) -> np.ndarray: ... + +# keep in sync with other the `masked_*` functions +@overload # known array with known shape and dtype +def masked_invalid[ShapeT: _Shape, DTypeT: np.dtype]( + a: ndarray[ShapeT, DTypeT], + copy: bool = True, +) -> MaskedArray[ShapeT, DTypeT]: ... +@overload # array-like of known scalar-type +def masked_invalid[ScalarT: np.generic](a: _ArrayLike[ScalarT], copy: bool = True) -> _MaskedArray[ScalarT]: ... +@overload # unknown array-like +def masked_invalid(a: ArrayLike, copy: bool = True) -> _MaskedArray[Incomplete]: ... + +# keep in sync with other the `masked_*` functions +@overload # array-like of known scalar-type +def masked_where[ShapeT: _Shape, DTypeT: np.dtype]( + condition: _ArrayLikeBool_co, + a: ndarray[ShapeT, DTypeT], + copy: bool = True, +) -> MaskedArray[ShapeT, DTypeT]: ... +@overload # array-like of known scalar-type +def masked_where[ScalarT: np.generic]( + condition: _ArrayLikeBool_co, + a: _ArrayLike[ScalarT], + copy: bool = True, +) -> _MaskedArray[ScalarT]: ... +@overload # unknown array-like +def masked_where(condition: _ArrayLikeBool_co, a: ArrayLike, copy: bool = True) -> _MaskedArray[Incomplete]: ... + +# keep in sync with other the `masked_*` functions +@overload # known array with known shape and dtype +def masked_greater[ShapeT: _Shape, DTypeT: np.dtype]( + x: ndarray[ShapeT, DTypeT], + value: ArrayLike, + copy: bool = True, +) -> MaskedArray[ShapeT, DTypeT]: ... +@overload # array-like of known scalar-type +def masked_greater[ScalarT: np.generic](x: _ArrayLike[ScalarT], value: ArrayLike, copy: bool = True) -> _MaskedArray[ScalarT]: ... +@overload # unknown array-like +def masked_greater(x: ArrayLike, value: ArrayLike, copy: bool = True) -> _MaskedArray[Incomplete]: ... + +# keep in sync with other the `masked_*` functions +@overload # known array with known shape and dtype +def masked_greater_equal[ShapeT: _Shape, DTypeT: np.dtype]( + x: ndarray[ShapeT, DTypeT], + value: ArrayLike, + copy: bool = True, +) -> MaskedArray[ShapeT, DTypeT]: ... +@overload # array-like of known scalar-type +def masked_greater_equal[ScalarT: np.generic]( + x: _ArrayLike[ScalarT], + value: ArrayLike, + copy: bool = True, +) -> _MaskedArray[ScalarT]: ... +@overload # unknown array-like +def masked_greater_equal(x: ArrayLike, value: ArrayLike, copy: bool = True) -> _MaskedArray[Incomplete]: ... + +# keep in sync with other the `masked_*` functions +@overload # known array with known shape and dtype +def masked_less[ShapeT: _Shape, DTypeT: np.dtype]( + x: ndarray[ShapeT, DTypeT], + value: ArrayLike, + copy: bool = True, +) -> MaskedArray[ShapeT, DTypeT]: ... +@overload # array-like of known scalar-type +def masked_less[ScalarT: np.generic](x: _ArrayLike[ScalarT], value: ArrayLike, copy: bool = True) -> _MaskedArray[ScalarT]: ... +@overload # unknown array-like +def masked_less(x: ArrayLike, value: ArrayLike, copy: bool = True) -> _MaskedArray[Incomplete]: ... + +# keep in sync with other the `masked_*` functions +@overload # known array with known shape and dtype +def masked_less_equal[ShapeT: _Shape, DTypeT: np.dtype]( + x: ndarray[ShapeT, DTypeT], + value: ArrayLike, + copy: bool = True, +) -> MaskedArray[ShapeT, DTypeT]: ... +@overload # array-like of known scalar-type +def masked_less_equal[ScalarT: np.generic]( + x: _ArrayLike[ScalarT], + value: ArrayLike, + copy: bool = True, +) -> _MaskedArray[ScalarT]: ... +@overload # unknown array-like +def masked_less_equal(x: ArrayLike, value: ArrayLike, copy: bool = True) -> _MaskedArray[Incomplete]: ... + +# keep in sync with other the `masked_*` functions +@overload # known array with known shape and dtype +def masked_not_equal[ShapeT: _Shape, DTypeT: np.dtype]( + x: ndarray[ShapeT, DTypeT], + value: ArrayLike, + copy: bool = True, +) -> MaskedArray[ShapeT, DTypeT]: ... +@overload # array-like of known scalar-type +def masked_not_equal[ScalarT: np.generic]( + x: _ArrayLike[ScalarT], + value: ArrayLike, + copy: bool = True, +) -> _MaskedArray[ScalarT]: ... +@overload # unknown array-like +def masked_not_equal(x: ArrayLike, value: ArrayLike, copy: bool = True) -> _MaskedArray[Incomplete]: ... + +# keep in sync with other the `masked_*` functions +@overload # known array with known shape and dtype +def masked_equal[ShapeT: _Shape, DTypeT: np.dtype]( + x: ndarray[ShapeT, DTypeT], + value: ArrayLike, + copy: bool = True, +) -> MaskedArray[ShapeT, DTypeT]: ... +@overload # array-like of known scalar-type +def masked_equal[ScalarT: np.generic](x: _ArrayLike[ScalarT], value: ArrayLike, copy: bool = True) -> _MaskedArray[ScalarT]: ... +@overload # unknown array-like +def masked_equal(x: ArrayLike, value: ArrayLike, copy: bool = True) -> _MaskedArray[Incomplete]: ... + +# keep in sync with other the `masked_*` functions +@overload # known array with known shape and dtype +def masked_inside[ShapeT: _Shape, DTypeT: np.dtype]( + x: ndarray[ShapeT, DTypeT], + v1: ArrayLike, + v2: ArrayLike, + copy: bool = True, +) -> MaskedArray[ShapeT, DTypeT]: ... +@overload # array-like of known scalar-type +def masked_inside[ScalarT: np.generic]( + x: _ArrayLike[ScalarT], + v1: ArrayLike, + v2: ArrayLike, + copy: bool = True, +) -> _MaskedArray[ScalarT]: ... +@overload # unknown array-like +def masked_inside(x: ArrayLike, v1: ArrayLike, v2: ArrayLike, copy: bool = True) -> _MaskedArray[Incomplete]: ... + +# keep in sync with other the `masked_*` functions +@overload # known array with known shape and dtype +def masked_outside[ShapeT: _Shape, DTypeT: np.dtype]( + x: ndarray[ShapeT, DTypeT], + v1: ArrayLike, + v2: ArrayLike, + copy: bool = True, +) -> MaskedArray[ShapeT, DTypeT]: ... +@overload # array-like of known scalar-type +def masked_outside[ScalarT: np.generic]( + x: _ArrayLike[ScalarT], + v1: ArrayLike, + v2: ArrayLike, + copy: bool = True, +) -> _MaskedArray[ScalarT]: ... +@overload # unknown array-like +def masked_outside(x: ArrayLike, v1: ArrayLike, v2: ArrayLike, copy: bool = True) -> _MaskedArray[Incomplete]: ... + +# only intended for object arrays, so we assume that's how it's always used in practice +@overload +def masked_object[ShapeT: _Shape]( + x: np.ndarray[ShapeT, np.dtype[np.object_]], + value: object, + copy: bool = True, + shrink: bool = True, +) -> MaskedArray[ShapeT, np.dtype[np.object_]]: ... +@overload +def masked_object( + x: _ArrayLikeObject_co, + value: object, + copy: bool = True, + shrink: bool = True, +) -> _MaskedArray[np.object_]: ... + +# keep roughly in sync with `filled` +@overload +def masked_values[ShapeT: _Shape, DTypeT: np.dtype]( + x: np.ndarray[ShapeT, DTypeT], + value: _ScalarLike_co, + rtol: float = 1e-5, + atol: float = 1e-8, + copy: bool = True, + shrink: bool = True, +) -> MaskedArray[ShapeT, DTypeT]: ... +@overload +def masked_values[ScalarT: np.generic]( + x: _ArrayLike[ScalarT], + value: _ScalarLike_co, + rtol: float = 1e-5, + atol: float = 1e-8, + copy: bool = True, + shrink: bool = True, +) -> _MaskedArray[ScalarT]: ... +@overload +def masked_values( + x: ArrayLike, + value: _ScalarLike_co, + rtol: float = 1e-5, + atol: float = 1e-8, + copy: bool = True, + shrink: bool = True, +) -> _MaskedArray[Incomplete]: ... -masked_print_option: _MaskedPrintOption +# TODO: Support non-boolean mask dtypes, such as `np.void`. This will require adding an +# additional generic type parameter to (at least) `MaskedArray` and `MaskedIterator` to +# hold the dtype of the mask. -def flatten_structured_array(a): ... +class MaskedIterator(Generic[_ShapeT_co, _DTypeT_co]): + ma: MaskedArray[_ShapeT_co, _DTypeT_co] # readonly + dataiter: np.flatiter[ndarray[_ShapeT_co, _DTypeT_co]] # readonly + maskiter: Final[np.flatiter[NDArray[np.bool]]] -class MaskedIterator: - ma: Any - dataiter: Any - maskiter: Any - def __init__(self, ma): ... - def __iter__(self): ... - def __getitem__(self, indx): ... - def __setitem__(self, index, value): ... - def __next__(self): ... + def __init__(self, ma: MaskedArray[_ShapeT_co, _DTypeT_co]) -> None: ... + def __iter__(self) -> Self: ... + + # Similar to `MaskedArray.__getitem__` but without the `void` case. + @overload + def __getitem__(self, indx: _ArrayInt_co | tuple[_ArrayInt_co, ...], /) -> MaskedArray[_AnyShape, _DTypeT_co]: ... + @overload + def __getitem__(self, indx: SupportsIndex | tuple[SupportsIndex, ...], /) -> Incomplete: ... + @overload + def __getitem__(self, indx: _ToIndices, /) -> MaskedArray[_AnyShape, _DTypeT_co]: ... + + # Similar to `ndarray.__setitem__` but without the `void` case. + @overload # flexible | object_ | bool + def __setitem__( + self: MaskedIterator[Any, dtype[np.flexible | object_ | np.bool] | np.dtypes.StringDType], + index: _ToIndices, + value: object, + /, + ) -> None: ... + @overload # integer + def __setitem__( + self: MaskedIterator[Any, dtype[integer]], + index: _ToIndices, + value: _ConvertibleToInt | _NestedSequence[_ConvertibleToInt] | _ArrayLikeInt_co, + /, + ) -> None: ... + @overload # floating + def __setitem__( + self: MaskedIterator[Any, dtype[floating]], + index: _ToIndices, + value: _ConvertibleToFloat | _NestedSequence[_ConvertibleToFloat | None] | _ArrayLikeFloat_co | None, + /, + ) -> None: ... + @overload # complexfloating + def __setitem__( + self: MaskedIterator[Any, dtype[complexfloating]], + index: _ToIndices, + value: _ConvertibleToComplex | _NestedSequence[_ConvertibleToComplex | None] | _ArrayLikeNumber_co | None, + /, + ) -> None: ... + @overload # timedelta64 + def __setitem__( + self: MaskedIterator[Any, dtype[timedelta64]], + index: _ToIndices, + value: _ConvertibleToTD64 | _NestedSequence[_ConvertibleToTD64], + /, + ) -> None: ... + @overload # datetime64 + def __setitem__( + self: MaskedIterator[Any, dtype[datetime64]], + index: _ToIndices, + value: _ConvertibleToDT64 | _NestedSequence[_ConvertibleToDT64], + /, + ) -> None: ... + @overload # catch-all + def __setitem__(self, index: _ToIndices, value: ArrayLike, /) -> None: ... + + # TODO: Returns `mvoid[(), _DTypeT_co]` for masks with `np.void` dtype. + def __next__[ScalarT: np.generic](self: MaskedIterator[Any, np.dtype[ScalarT]]) -> ScalarT: ... class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): - __array_priority__: Any - def __new__(cls, data=..., mask=..., dtype=..., copy=..., subok=..., ndmin=..., fill_value=..., keep_mask=..., hard_mask=..., shrink=..., order=...): ... - def __array_finalize__(self, obj): ... - def __array_wrap__(self, obj, context=..., return_scalar=...): ... - def view(self, dtype=..., type=..., fill_value=...): ... - def __getitem__(self, indx): ... - def __setitem__(self, indx, value): ... + __array_priority__: Final[Literal[15]] = 15 + + @overload + def __new__[ScalarT: np.generic]( + cls, + data: _ArrayLike[ScalarT], + mask: _ArrayLikeBool_co = nomask, + dtype: None = None, + copy: bool = False, + subok: bool = True, + ndmin: int = 0, + fill_value: _ScalarLike_co | None = None, + keep_mask: bool = True, + hard_mask: bool | None = None, + shrink: bool = True, + order: _OrderKACF | None = None, + ) -> _MaskedArray[ScalarT]: ... + @overload + def __new__[ScalarT: np.generic]( + cls, + data: object, + mask: _ArrayLikeBool_co, + dtype: _DTypeLike[ScalarT], + copy: bool = False, + subok: bool = True, + ndmin: int = 0, + fill_value: _ScalarLike_co | None = None, + keep_mask: bool = True, + hard_mask: bool | None = None, + shrink: bool = True, + order: _OrderKACF | None = None, + ) -> _MaskedArray[ScalarT]: ... + @overload + def __new__[ScalarT: np.generic]( + cls, + data: object, + mask: _ArrayLikeBool_co = nomask, + *, + dtype: _DTypeLike[ScalarT], + copy: bool = False, + subok: bool = True, + ndmin: int = 0, + fill_value: _ScalarLike_co | None = None, + keep_mask: bool = True, + hard_mask: bool | None = None, + shrink: bool = True, + order: _OrderKACF | None = None, + ) -> _MaskedArray[ScalarT]: ... + @overload + def __new__( + cls, + data: object = None, + mask: _ArrayLikeBool_co = nomask, + dtype: DTypeLike | None = None, + copy: bool = False, + subok: bool = True, + ndmin: int = 0, + fill_value: _ScalarLike_co | None = None, + keep_mask: bool = True, + hard_mask: bool | None = None, + shrink: bool = True, + order: _OrderKACF | None = None, + ) -> _MaskedArray[Any]: ... + + def __array_wrap__[ShapeT: _Shape, DTypeT: np.dtype]( + self, + obj: ndarray[ShapeT, DTypeT], + context: tuple[np.ufunc, tuple[Any, ...], int] | None = None, + return_scalar: bool = False, + ) -> MaskedArray[ShapeT, DTypeT]: ... + + @overload # type: ignore[override] # () + def view(self, /, dtype: None = None, type: None = None, fill_value: _ScalarLike_co | None = None) -> Self: ... + @overload # (dtype: DTypeT) + def view[DTypeT: np.dtype]( + self, + /, + dtype: DTypeT | _HasDType[DTypeT], + type: None = None, + fill_value: _ScalarLike_co | None = None, + ) -> MaskedArray[_ShapeT_co, DTypeT]: ... + @overload # (dtype: dtype[ScalarT]) + def view[ScalarT: np.generic]( + self, + /, + dtype: _DTypeLike[ScalarT], + type: None = None, + fill_value: _ScalarLike_co | None = None, + ) -> MaskedArray[_ShapeT_co, dtype[ScalarT]]: ... + @overload # ([dtype: _, ]*, type: ArrayT) + def view[ArrayT: np.ndarray]( + self, + /, + dtype: DTypeLike | None = None, + *, + type: type[ArrayT], + fill_value: _ScalarLike_co | None = None, + ) -> ArrayT: ... + @overload # (dtype: _, type: ArrayT) + def view[ArrayT: np.ndarray]( + self, + /, + dtype: DTypeLike | None, + type: type[ArrayT], + fill_value: _ScalarLike_co | None = None, + ) -> ArrayT: ... + @overload # (dtype: ArrayT, /) + def view[ArrayT: np.ndarray]( + self, + /, + dtype: type[ArrayT], + type: None = None, + fill_value: _ScalarLike_co | None = None, + ) -> ArrayT: ... + @overload # (dtype: ?) + def view( + self, + /, + # `_VoidDTypeLike | str | None` is like `DTypeLike` but without `_DTypeLike[Any]` to avoid + # overlaps with previous overloads. + dtype: _VoidDTypeLike | str | None, + type: None = None, + fill_value: _ScalarLike_co | None = None, + ) -> MaskedArray[_ShapeT_co, np.dtype]: ... + + # Keep in sync with `ndarray.__getitem__` + @overload + def __getitem__(self, key: _ArrayInt_co | tuple[_ArrayInt_co, ...], /) -> MaskedArray[_AnyShape, _DTypeT_co]: ... + @overload + def __getitem__(self, key: SupportsIndex | tuple[SupportsIndex, ...], /) -> Any: ... + @overload + def __getitem__(self, key: _ToIndices, /) -> MaskedArray[_AnyShape, _DTypeT_co]: ... + @overload + def __getitem__(self: _MaskedArray[np.void], indx: str, /) -> MaskedArray[_ShapeT_co]: ... + @overload + def __getitem__(self: _MaskedArray[np.void], indx: list[str], /) -> MaskedArray[_ShapeT_co, dtype[np.void]]: ... + @property def shape(self) -> _ShapeT_co: ... - @shape.setter - def shape(self: MaskedArray[_ShapeT, Any], shape: _ShapeT, /) -> None: ... + @shape.setter # type: ignore[override] + def shape[ShapeT: _Shape](self: MaskedArray[ShapeT, Any], shape: ShapeT, /) -> None: ... + def __setmask__(self, mask: _ArrayLikeBool_co, copy: bool = False) -> None: ... @property - def mask(self) -> NDArray[MaskType] | MaskType: ... + def mask(self) -> np.ndarray[_ShapeT_co, dtype[MaskType]] | MaskType: ... @mask.setter def mask(self, value: _ArrayLikeBool_co, /) -> None: ... @property - def recordmask(self): ... + def recordmask(self) -> np.ndarray[_ShapeT_co, dtype[MaskType]] | MaskType: ... @recordmask.setter - def recordmask(self, mask): ... + def recordmask(self, mask: Never, /) -> NoReturn: ... def harden_mask(self) -> Self: ... def soften_mask(self) -> Self: ... @property @@ -441,323 +1168,1025 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): @property def sharedmask(self) -> bool: ... def shrink_mask(self) -> Self: ... + @property - def baseclass(self) -> type[NDArray[Any]]: ... - data: Any + def baseclass(self) -> type[ndarray]: ... + + @property + def _data(self) -> ndarray[_ShapeT_co, _DTypeT_co]: ... @property - def flat(self): ... + def data(self) -> ndarray[_ShapeT_co, _DTypeT_co]: ... # type: ignore[override] + + @property # type: ignore[override] + def flat(self) -> MaskedIterator[_ShapeT_co, _DTypeT_co]: ... @flat.setter - def flat(self, value): ... + def flat(self, value: ArrayLike, /) -> None: ... + @property - def fill_value(self): ... + def fill_value[ScalarT: np.generic](self: _MaskedArray[ScalarT]) -> ScalarT: ... @fill_value.setter - def fill_value(self, value=...): ... - get_fill_value: Any - set_fill_value: Any + def fill_value(self, value: _ScalarLike_co | None = None, /) -> None: ... + + def get_fill_value[ScalarT: np.generic](self: _MaskedArray[ScalarT]) -> ScalarT: ... + def set_fill_value(self, /, value: _ScalarLike_co | None = None) -> None: ... + def filled(self, /, fill_value: _ScalarLike_co | None = None) -> ndarray[_ShapeT_co, _DTypeT_co]: ... def compressed(self) -> ndarray[tuple[int], _DTypeT_co]: ... - def compress(self, condition, axis=..., out=...): ... - def __eq__(self, other): ... - def __ne__(self, other): ... + + # keep roughly in sync with `ma.core.compress`, but swap the first two arguments + @overload # type: ignore[override] + def compress[ArrayT: np.ndarray]( + self, + condition: _ArrayLikeBool_co, + axis: _ShapeLike | None, + out: ArrayT, + ) -> ArrayT: ... + @overload + def compress[ArrayT: np.ndarray]( + self, + condition: _ArrayLikeBool_co, + axis: _ShapeLike | None = None, + *, + out: ArrayT, + ) -> ArrayT: ... + @overload + def compress( + self, + condition: _ArrayLikeBool_co, + axis: None = None, + out: None = None, + ) -> MaskedArray[tuple[int], _DTypeT_co]: ... + @overload + def compress( + self, + condition: _ArrayLikeBool_co, + axis: _ShapeLike | None = None, + out: None = None, + ) -> MaskedArray[_AnyShape, _DTypeT_co]: ... + + # TODO: How to deal with the non-commutative nature of `==` and `!=`? + # xref numpy/numpy#17368 + def __eq__(self, other: Incomplete, /) -> Incomplete: ... + def __ne__(self, other: Incomplete, /) -> Incomplete: ... + def __ge__(self, other: ArrayLike, /) -> _MaskedArray[bool_]: ... # type: ignore[override] def __gt__(self, other: ArrayLike, /) -> _MaskedArray[bool_]: ... # type: ignore[override] def __le__(self, other: ArrayLike, /) -> _MaskedArray[bool_]: ... # type: ignore[override] def __lt__(self, other: ArrayLike, /) -> _MaskedArray[bool_]: ... # type: ignore[override] - def __add__(self, other): ... - def __radd__(self, other): ... - def __sub__(self, other): ... - def __rsub__(self, other): ... - def __mul__(self, other): ... - def __rmul__(self, other): ... - def __truediv__(self, other): ... - def __rtruediv__(self, other): ... - def __floordiv__(self, other): ... - def __rfloordiv__(self, other): ... - def __pow__(self, other, mod: None = None, /): ... - def __rpow__(self, other, mod: None = None, /): ... - - # Keep in sync with `ndarray.__iadd__`, except that `_MaskedArray[unsignedinteger]` does not accept - # _IntLike_co for `other`. - @overload - def __iadd__( - self: _MaskedArray[np.bool], other: _ArrayLikeBool_co, / - ) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... - @overload - def __iadd__( - self: _MaskedArray[unsignedinteger], other: _ArrayLikeUInt_co, / - ) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... - @overload - def __iadd__( - self: _MaskedArray[signedinteger], other: _ArrayLikeInt_co, / - ) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... - @overload - def __iadd__( - self: _MaskedArray[floating], other: _ArrayLikeFloat_co, / - ) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... - @overload - def __iadd__( - self: _MaskedArray[complexfloating], other: _ArrayLikeComplex_co, / - ) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... - @overload - def __iadd__( - self: _MaskedArray[timedelta64 | datetime64], other: _ArrayLikeTD64_co, / - ) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... - @overload - def __iadd__(self: _MaskedArray[bytes_], other: _ArrayLikeBytes_co, /) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... - @overload - def __iadd__( - self: MaskedArray[Any, dtype[str_] | dtypes.StringDType], - other: _ArrayLikeStr_co | _ArrayLikeString_co, - /, - ) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... - @overload - def __iadd__( - self: _MaskedArray[object_], other: Any, / - ) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... - # Keep in sync with `ndarray.__isub__`, except that `_MaskedArray[unsignedinteger]` does not accept - # _IntLike_co for `other`. + # Keep in sync with `ndarray.__add__` + @overload # type: ignore[override] + def __add__[ScalarT: np.number]( + self: _MaskedArray[ScalarT], + other: int | np.bool, + /, + ) -> MaskedArray[_ShapeT_co, dtype[ScalarT]]: ... @overload - def __isub__( - self: _MaskedArray[unsignedinteger], other: _ArrayLikeUInt_co, / - ) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... + def __add__[ScalarT: np.number](self: _MaskedArray[ScalarT], other: _ArrayLikeBool_co, /) -> _MaskedArray[ScalarT]: ... @overload - def __isub__( - self: _MaskedArray[signedinteger], other: _ArrayLikeInt_co, / - ) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... + def __add__(self: _MaskedArray[np.bool], other: _ArrayLikeBool_co, /) -> _MaskedArray[np.bool]: ... @overload - def __isub__( - self: _MaskedArray[floating], other: _ArrayLikeFloat_co, / - ) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... + def __add__[ScalarT: np.number](self: _MaskedArray[np.bool], other: _ArrayLike[ScalarT], /) -> _MaskedArray[ScalarT]: ... @overload - def __isub__( - self: _MaskedArray[complexfloating], other: _ArrayLikeComplex_co, / - ) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... + def __add__(self: _MaskedArray[float64], other: _ArrayLikeFloat64_co, /) -> _MaskedArray[float64]: ... @overload - def __isub__( - self: _MaskedArray[timedelta64 | datetime64], other: _ArrayLikeTD64_co, / - ) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... + def __add__(self: _MaskedArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> _MaskedArray[float64]: ... @overload - def __isub__( - self: _MaskedArray[object_], other: Any, / - ) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... - - # Keep in sync with `ndarray.__imul__`, except that `_MaskedArray[unsignedinteger]` does not accept - # _IntLike_co for `other`. + def __add__(self: _MaskedArray[complex128], other: _ArrayLikeComplex128_co, /) -> _MaskedArray[complex128]: ... @overload - def __imul__( - self: _MaskedArray[np.bool], other: _ArrayLikeBool_co, / - ) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... + def __add__(self: _MaskedArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> _MaskedArray[complex128]: ... @overload - def __imul__( - self: _MaskedArray[unsignedinteger], other: _ArrayLikeUInt_co, / - ) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... + def __add__(self: _MaskedArrayUInt_co, other: _ArrayLikeUInt_co, /) -> _MaskedArray[unsignedinteger]: ... @overload - def __imul__( - self: MaskedArray[Any, dtype[signedinteger] | dtype[character] | dtypes.StringDType], - other: _ArrayLikeInt_co, / - ) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... + def __add__(self: _MaskedArrayInt_co, other: _ArrayLikeInt_co, /) -> _MaskedArray[signedinteger]: ... @overload - def __imul__( - self: _MaskedArray[floating | timedelta64], other: _ArrayLikeFloat_co, / - ) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... + def __add__(self: _MaskedArrayFloat_co, other: _ArrayLikeFloat_co, /) -> _MaskedArray[floating]: ... @overload - def __imul__( - self: _MaskedArray[complexfloating], other: _ArrayLikeComplex_co, / - ) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... + def __add__(self: _MaskedArrayComplex_co, other: _ArrayLikeComplex_co, /) -> _MaskedArray[complexfloating]: ... @overload - def __imul__( - self: _MaskedArray[object_], other: Any, / - ) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... - - # Keep in sync with `ndarray.__ifloordiv__`, except that `_MaskedArray[unsignedinteger]` does not accept - # _IntLike_co for `other`. + def __add__(self: _MaskedArray[number], other: _ArrayLikeNumber_co, /) -> _MaskedArray[number]: ... @overload - def __ifloordiv__( - self: _MaskedArray[unsignedinteger], other: _ArrayLikeUInt_co, / - ) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... + def __add__(self: _MaskedArrayTD64_co, other: _ArrayLikeTD64_co, /) -> _MaskedArray[timedelta64]: ... @overload - def __ifloordiv__( - self: _MaskedArray[signedinteger], other: _ArrayLikeInt_co, / - ) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... + def __add__(self: _MaskedArrayTD64_co, other: _ArrayLikeDT64_co, /) -> _MaskedArray[datetime64]: ... @overload - def __ifloordiv__( - self: _MaskedArray[floating | timedelta64], other: _ArrayLikeFloat_co, / - ) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... + def __add__(self: _MaskedArray[datetime64], other: _ArrayLikeTD64_co, /) -> _MaskedArray[datetime64]: ... @overload - def __ifloordiv__( - self: _MaskedArray[object_], other: Any, / - ) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... - - # Keep in sync with `ndarray.__itruediv__`, except that `_MaskedArray[unsignedinteger]` does not accept - # _IntLike_co for `other`. + def __add__(self: _MaskedArray[bytes_], other: _ArrayLikeBytes_co, /) -> _MaskedArray[bytes_]: ... @overload - def __itruediv__( - self: _MaskedArray[floating | timedelta64], other: _ArrayLikeFloat_co, / - ) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... + def __add__(self: _MaskedArray[str_], other: _ArrayLikeStr_co, /) -> _MaskedArray[str_]: ... @overload - def __itruediv__( - self: _MaskedArray[complexfloating], - other: _ArrayLikeComplex_co, + def __add__( + self: MaskedArray[Any, np.dtypes.StringDType], + other: _ArrayLikeStr_co | _ArrayLikeString_co, /, - ) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... + ) -> MaskedArray[_AnyShape, np.dtypes.StringDType]: ... + @overload + def __add__(self: _MaskedArray[object_], other: Any, /) -> Any: ... @overload - def __itruediv__( - self: _MaskedArray[object_], other: Any, / - ) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... + def __add__(self: _MaskedArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... - # Keep in sync with `ndarray.__ipow__`, except that `_MaskedArray[unsignedinteger]` does not accept - # _IntLike_co for `other`. + # Keep in sync with `ndarray.__radd__` + @overload # type: ignore[override] # signature equivalent to __add__ + def __radd__[ScalarT: np.number]( + self: _MaskedArray[ScalarT], + other: int | np.bool, + /, + ) -> MaskedArray[_ShapeT_co, dtype[ScalarT]]: ... @overload - def __ipow__( - self: _MaskedArray[unsignedinteger], other: _ArrayLikeUInt_co, / - ) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... + def __radd__[ScalarT: np.number](self: _MaskedArray[ScalarT], other: _ArrayLikeBool_co, /) -> _MaskedArray[ScalarT]: ... @overload - def __ipow__( - self: _MaskedArray[signedinteger], other: _ArrayLikeInt_co, / - ) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... + def __radd__(self: _MaskedArray[np.bool], other: _ArrayLikeBool_co, /) -> _MaskedArray[np.bool]: ... @overload - def __ipow__( - self: _MaskedArray[floating], other: _ArrayLikeFloat_co, / - ) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... + def __radd__[ScalarT: np.number](self: _MaskedArray[np.bool], other: _ArrayLike[ScalarT], /) -> _MaskedArray[ScalarT]: ... @overload - def __ipow__( - self: _MaskedArray[complexfloating], other: _ArrayLikeComplex_co, / - ) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... + def __radd__(self: _MaskedArray[float64], other: _ArrayLikeFloat64_co, /) -> _MaskedArray[float64]: ... @overload - def __ipow__( - self: _MaskedArray[object_], other: Any, / - ) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... - - # - @property # type: ignore[misc] - def imag(self: _HasDTypeWithRealAndImag[object, _ScalarT], /) -> MaskedArray[_ShapeT_co, dtype[_ScalarT]]: ... - get_imag: Any - @property # type: ignore[misc] - def real(self: _HasDTypeWithRealAndImag[_ScalarT, object], /) -> MaskedArray[_ShapeT_co, dtype[_ScalarT]]: ... - get_real: Any - - # keep in sync with `np.ma.count` + def __radd__(self: _MaskedArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> _MaskedArray[float64]: ... @overload - def count(self, axis: None = None, keepdims: Literal[False] | _NoValueType = ...) -> int: ... + def __radd__(self: _MaskedArray[complex128], other: _ArrayLikeComplex128_co, /) -> _MaskedArray[complex128]: ... @overload - def count(self, axis: _ShapeLike, keepdims: bool | _NoValueType = ...) -> NDArray[int_]: ... + def __radd__(self: _MaskedArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> _MaskedArray[complex128]: ... @overload - def count(self, axis: _ShapeLike | None = ..., *, keepdims: Literal[True]) -> NDArray[int_]: ... + def __radd__(self: _MaskedArrayUInt_co, other: _ArrayLikeUInt_co, /) -> _MaskedArray[unsignedinteger]: ... @overload - def count(self, axis: _ShapeLike | None, keepdims: Literal[True]) -> NDArray[int_]: ... - - def ravel(self, order: _OrderKACF = "C") -> MaskedArray[tuple[int], _DTypeT_co]: ... - def reshape(self, *s, **kwargs): ... - def resize(self, newshape, refcheck=..., order=...): ... - def put(self, indices: _ArrayLikeInt_co, values: ArrayLike, mode: _ModeKind = "raise") -> None: ... - def ids(self) -> tuple[int, int]: ... - def iscontiguous(self) -> bool: ... - + def __radd__(self: _MaskedArrayInt_co, other: _ArrayLikeInt_co, /) -> _MaskedArray[signedinteger]: ... @overload - def all( - self, - axis: None = None, - out: None = None, - keepdims: Literal[False] | _NoValueType = ..., - ) -> bool_: ... + def __radd__(self: _MaskedArrayFloat_co, other: _ArrayLikeFloat_co, /) -> _MaskedArray[floating]: ... @overload - def all( - self, - axis: _ShapeLike | None = None, - out: None = None, - *, - keepdims: Literal[True], - ) -> _MaskedArray[bool_]: ... + def __radd__(self: _MaskedArrayComplex_co, other: _ArrayLikeComplex_co, /) -> _MaskedArray[complexfloating]: ... @overload - def all( - self, - axis: _ShapeLike | None, - out: None, - keepdims: Literal[True], - ) -> _MaskedArray[bool_]: ... + def __radd__(self: _MaskedArray[number], other: _ArrayLikeNumber_co, /) -> _MaskedArray[number]: ... @overload - def all( - self, - axis: _ShapeLike | None = None, - out: None = None, - keepdims: bool | _NoValueType = ..., - ) -> bool_ | _MaskedArray[bool_]: ... + def __radd__(self: _MaskedArrayTD64_co, other: _ArrayLikeTD64_co, /) -> _MaskedArray[timedelta64]: ... @overload - def all( - self, - axis: _ShapeLike | None = None, - *, - out: _ArrayT, - keepdims: bool | _NoValueType = ..., - ) -> _ArrayT: ... + def __radd__(self: _MaskedArrayTD64_co, other: _ArrayLikeDT64_co, /) -> _MaskedArray[datetime64]: ... @overload - def all( - self, - axis: _ShapeLike | None, - out: _ArrayT, - keepdims: bool | _NoValueType = ..., - ) -> _ArrayT: ... - + def __radd__(self: _MaskedArray[datetime64], other: _ArrayLikeTD64_co, /) -> _MaskedArray[datetime64]: ... @overload - def any( - self, - axis: None = None, - out: None = None, - keepdims: Literal[False] | _NoValueType = ..., - ) -> bool_: ... + def __radd__(self: _MaskedArray[bytes_], other: _ArrayLikeBytes_co, /) -> _MaskedArray[bytes_]: ... @overload - def any( - self, - axis: _ShapeLike | None = None, - out: None = None, - *, - keepdims: Literal[True], - ) -> _MaskedArray[bool_]: ... + def __radd__(self: _MaskedArray[str_], other: _ArrayLikeStr_co, /) -> _MaskedArray[str_]: ... @overload - def any( - self, - axis: _ShapeLike | None, - out: None, + def __radd__( + self: MaskedArray[Any, np.dtypes.StringDType], + other: _ArrayLikeStr_co | _ArrayLikeString_co, + /, + ) -> MaskedArray[_AnyShape, np.dtypes.StringDType]: ... + @overload + def __radd__(self: _MaskedArray[object_], other: Any, /) -> Any: ... + @overload + def __radd__(self: _MaskedArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + + # Keep in sync with `ndarray.__sub__` + @overload # type: ignore[override] + def __sub__[ScalarT: np.number]( + self: _MaskedArray[ScalarT], + other: int | np.bool, + /, + ) -> MaskedArray[_ShapeT_co, dtype[ScalarT]]: ... + @overload + def __sub__[ScalarT: np.number](self: _MaskedArray[ScalarT], other: _ArrayLikeBool_co, /) -> _MaskedArray[ScalarT]: ... + @overload + def __sub__(self: _MaskedArray[np.bool], other: _ArrayLikeBool_co, /) -> NoReturn: ... + @overload + def __sub__[ScalarT: np.number](self: _MaskedArray[np.bool], other: _ArrayLike[ScalarT], /) -> _MaskedArray[ScalarT]: ... + @overload + def __sub__(self: _MaskedArray[float64], other: _ArrayLikeFloat64_co, /) -> _MaskedArray[float64]: ... + @overload + def __sub__(self: _MaskedArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> _MaskedArray[float64]: ... + @overload + def __sub__(self: _MaskedArray[complex128], other: _ArrayLikeComplex128_co, /) -> _MaskedArray[complex128]: ... + @overload + def __sub__(self: _MaskedArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> _MaskedArray[complex128]: ... + @overload + def __sub__(self: _MaskedArrayUInt_co, other: _ArrayLikeUInt_co, /) -> _MaskedArray[unsignedinteger]: ... + @overload + def __sub__(self: _MaskedArrayInt_co, other: _ArrayLikeInt_co, /) -> _MaskedArray[signedinteger]: ... + @overload + def __sub__(self: _MaskedArrayFloat_co, other: _ArrayLikeFloat_co, /) -> _MaskedArray[floating]: ... + @overload + def __sub__(self: _MaskedArrayComplex_co, other: _ArrayLikeComplex_co, /) -> _MaskedArray[complexfloating]: ... + @overload + def __sub__(self: _MaskedArray[number], other: _ArrayLikeNumber_co, /) -> _MaskedArray[number]: ... + @overload + def __sub__(self: _MaskedArrayTD64_co, other: _ArrayLikeTD64_co, /) -> _MaskedArray[timedelta64]: ... + @overload + def __sub__(self: _MaskedArray[datetime64], other: _ArrayLikeTD64_co, /) -> _MaskedArray[datetime64]: ... + @overload + def __sub__(self: _MaskedArray[datetime64], other: _ArrayLikeDT64_co, /) -> _MaskedArray[timedelta64]: ... + @overload + def __sub__(self: _MaskedArray[object_], other: Any, /) -> Any: ... + @overload + def __sub__(self: _MaskedArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + + # Keep in sync with `ndarray.__rsub__` + @overload # type: ignore[override] + def __rsub__[ScalarT: np.number]( + self: _MaskedArray[ScalarT], + other: int | np.bool, + /, + ) -> MaskedArray[_ShapeT_co, dtype[ScalarT]]: ... + @overload + def __rsub__[ScalarT: np.number](self: _MaskedArray[ScalarT], other: _ArrayLikeBool_co, /) -> _MaskedArray[ScalarT]: ... + @overload + def __rsub__(self: _MaskedArray[np.bool], other: _ArrayLikeBool_co, /) -> NoReturn: ... + @overload + def __rsub__[ScalarT: np.number](self: _MaskedArray[np.bool], other: _ArrayLike[ScalarT], /) -> _MaskedArray[ScalarT]: ... + @overload + def __rsub__(self: _MaskedArray[float64], other: _ArrayLikeFloat64_co, /) -> _MaskedArray[float64]: ... + @overload + def __rsub__(self: _MaskedArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> _MaskedArray[float64]: ... + @overload + def __rsub__(self: _MaskedArray[complex128], other: _ArrayLikeComplex128_co, /) -> _MaskedArray[complex128]: ... + @overload + def __rsub__(self: _MaskedArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> _MaskedArray[complex128]: ... + @overload + def __rsub__(self: _MaskedArrayUInt_co, other: _ArrayLikeUInt_co, /) -> _MaskedArray[unsignedinteger]: ... + @overload + def __rsub__(self: _MaskedArrayInt_co, other: _ArrayLikeInt_co, /) -> _MaskedArray[signedinteger]: ... + @overload + def __rsub__(self: _MaskedArrayFloat_co, other: _ArrayLikeFloat_co, /) -> _MaskedArray[floating]: ... + @overload + def __rsub__(self: _MaskedArrayComplex_co, other: _ArrayLikeComplex_co, /) -> _MaskedArray[complexfloating]: ... + @overload + def __rsub__(self: _MaskedArray[number], other: _ArrayLikeNumber_co, /) -> _MaskedArray[number]: ... + @overload + def __rsub__(self: _MaskedArrayTD64_co, other: _ArrayLikeTD64_co, /) -> _MaskedArray[timedelta64]: ... + @overload + def __rsub__(self: _MaskedArrayTD64_co, other: _ArrayLikeDT64_co, /) -> _MaskedArray[datetime64]: ... + @overload + def __rsub__(self: _MaskedArray[datetime64], other: _ArrayLikeDT64_co, /) -> _MaskedArray[timedelta64]: ... + @overload + def __rsub__(self: _MaskedArray[object_], other: Any, /) -> Any: ... + @overload + def __rsub__(self: _MaskedArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + + # Keep in sync with `ndarray.__mul__` + @overload # type: ignore[override] + def __mul__[ScalarT: np.number]( + self: _MaskedArray[ScalarT], + other: int | np.bool, + /, + ) -> MaskedArray[_ShapeT_co, dtype[ScalarT]]: ... + @overload + def __mul__[ScalarT: np.number](self: _MaskedArray[ScalarT], other: _ArrayLikeBool_co, /) -> _MaskedArray[ScalarT]: ... + @overload + def __mul__(self: _MaskedArray[np.bool], other: _ArrayLikeBool_co, /) -> _MaskedArray[np.bool]: ... + @overload + def __mul__[ScalarT: np.number](self: _MaskedArray[np.bool], other: _ArrayLike[ScalarT], /) -> _MaskedArray[ScalarT]: ... + @overload + def __mul__(self: _MaskedArray[float64], other: _ArrayLikeFloat64_co, /) -> _MaskedArray[float64]: ... + @overload + def __mul__(self: _MaskedArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> _MaskedArray[float64]: ... + @overload + def __mul__(self: _MaskedArray[complex128], other: _ArrayLikeComplex128_co, /) -> _MaskedArray[complex128]: ... + @overload + def __mul__(self: _MaskedArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> _MaskedArray[complex128]: ... + @overload + def __mul__(self: _MaskedArrayUInt_co, other: _ArrayLikeUInt_co, /) -> _MaskedArray[unsignedinteger]: ... + @overload + def __mul__(self: _MaskedArrayInt_co, other: _ArrayLikeInt_co, /) -> _MaskedArray[signedinteger]: ... + @overload + def __mul__(self: _MaskedArrayFloat_co, other: _ArrayLikeFloat_co, /) -> _MaskedArray[floating]: ... + @overload + def __mul__(self: _MaskedArrayComplex_co, other: _ArrayLikeComplex_co, /) -> _MaskedArray[complexfloating]: ... + @overload + def __mul__(self: _MaskedArray[number], other: _ArrayLikeNumber_co, /) -> _MaskedArray[number]: ... + @overload + def __mul__(self: _MaskedArray[timedelta64], other: _ArrayLikeFloat_co, /) -> _MaskedArray[timedelta64]: ... + @overload + def __mul__(self: _MaskedArrayFloat_co, other: _ArrayLike[timedelta64], /) -> _MaskedArray[timedelta64]: ... + @overload + def __mul__( + self: MaskedArray[Any, dtype[np.character] | np.dtypes.StringDType], + other: _ArrayLikeInt, + /, + ) -> MaskedArray[tuple[Any, ...], _DTypeT_co]: ... + @overload + def __mul__(self: _MaskedArray[object_], other: Any, /) -> Any: ... + @overload + def __mul__(self: _MaskedArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + + # Keep in sync with `ndarray.__rmul__` + @overload # type: ignore[override] # signature equivalent to __mul__ + def __rmul__[ScalarT: np.number]( + self: _MaskedArray[ScalarT], + other: int | np.bool, + /, + ) -> MaskedArray[_ShapeT_co, dtype[ScalarT]]: ... + @overload + def __rmul__[ScalarT: np.number](self: _MaskedArray[ScalarT], other: _ArrayLikeBool_co, /) -> _MaskedArray[ScalarT]: ... + @overload + def __rmul__(self: _MaskedArray[np.bool], other: _ArrayLikeBool_co, /) -> _MaskedArray[np.bool]: ... + @overload + def __rmul__[ScalarT: np.number](self: _MaskedArray[np.bool], other: _ArrayLike[ScalarT], /) -> _MaskedArray[ScalarT]: ... + @overload + def __rmul__(self: _MaskedArray[float64], other: _ArrayLikeFloat64_co, /) -> _MaskedArray[float64]: ... + @overload + def __rmul__(self: _MaskedArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> _MaskedArray[float64]: ... + @overload + def __rmul__(self: _MaskedArray[complex128], other: _ArrayLikeComplex128_co, /) -> _MaskedArray[complex128]: ... + @overload + def __rmul__(self: _MaskedArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> _MaskedArray[complex128]: ... + @overload + def __rmul__(self: _MaskedArrayUInt_co, other: _ArrayLikeUInt_co, /) -> _MaskedArray[unsignedinteger]: ... + @overload + def __rmul__(self: _MaskedArrayInt_co, other: _ArrayLikeInt_co, /) -> _MaskedArray[signedinteger]: ... + @overload + def __rmul__(self: _MaskedArrayFloat_co, other: _ArrayLikeFloat_co, /) -> _MaskedArray[floating]: ... + @overload + def __rmul__(self: _MaskedArrayComplex_co, other: _ArrayLikeComplex_co, /) -> _MaskedArray[complexfloating]: ... + @overload + def __rmul__(self: _MaskedArray[number], other: _ArrayLikeNumber_co, /) -> _MaskedArray[number]: ... + @overload + def __rmul__(self: _MaskedArray[timedelta64], other: _ArrayLikeFloat_co, /) -> _MaskedArray[timedelta64]: ... + @overload + def __rmul__(self: _MaskedArrayFloat_co, other: _ArrayLike[timedelta64], /) -> _MaskedArray[timedelta64]: ... + @overload + def __rmul__( + self: MaskedArray[Any, dtype[np.character] | np.dtypes.StringDType], + other: _ArrayLikeInt, + /, + ) -> MaskedArray[tuple[Any, ...], _DTypeT_co]: ... + @overload + def __rmul__(self: _MaskedArray[object_], other: Any, /) -> Any: ... + @overload + def __rmul__(self: _MaskedArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + + # Keep in sync with `ndarray.__truediv__` + @overload # type: ignore[override] + def __truediv__(self: _MaskedArrayInt_co | _MaskedArray[float64], other: _ArrayLikeFloat64_co, /) -> _MaskedArray[float64]: ... + @overload + def __truediv__(self: _MaskedArrayFloat64_co, other: _ArrayLikeInt_co | _ArrayLike[floating[_64Bit]], /) -> _MaskedArray[float64]: ... + @overload + def __truediv__(self: _MaskedArray[complex128], other: _ArrayLikeComplex128_co, /) -> _MaskedArray[complex128]: ... + @overload + def __truediv__(self: _MaskedArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> _MaskedArray[complex128]: ... + @overload + def __truediv__(self: _MaskedArray[floating], other: _ArrayLikeFloat_co, /) -> _MaskedArray[floating]: ... + @overload + def __truediv__(self: _MaskedArrayFloat_co, other: _ArrayLike[floating], /) -> _MaskedArray[floating]: ... + @overload + def __truediv__(self: _MaskedArray[complexfloating], other: _ArrayLikeNumber_co, /) -> _MaskedArray[complexfloating]: ... + @overload + def __truediv__(self: _MaskedArrayNumber_co, other: _ArrayLike[complexfloating], /) -> _MaskedArray[complexfloating]: ... + @overload + def __truediv__(self: _MaskedArray[inexact], other: _ArrayLikeNumber_co, /) -> _MaskedArray[inexact]: ... + @overload + def __truediv__(self: _MaskedArray[number], other: _ArrayLikeNumber_co, /) -> _MaskedArray[number]: ... + @overload + def __truediv__(self: _MaskedArray[timedelta64], other: _ArrayLike[timedelta64], /) -> _MaskedArray[float64]: ... + @overload + def __truediv__(self: _MaskedArray[timedelta64], other: _ArrayLikeBool_co, /) -> NoReturn: ... + @overload + def __truediv__(self: _MaskedArray[timedelta64], other: _ArrayLikeFloat_co, /) -> _MaskedArray[timedelta64]: ... + @overload + def __truediv__(self: _MaskedArray[object_], other: Any, /) -> Any: ... + @overload + def __truediv__(self: _MaskedArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + + # Keep in sync with `ndarray.__rtruediv__` + @overload # type: ignore[override] + def __rtruediv__(self: _MaskedArrayInt_co | _MaskedArray[float64], other: _ArrayLikeFloat64_co, /) -> _MaskedArray[float64]: ... + @overload + def __rtruediv__(self: _MaskedArrayFloat64_co, other: _ArrayLikeInt_co | _ArrayLike[floating[_64Bit]], /) -> _MaskedArray[float64]: ... + @overload + def __rtruediv__(self: _MaskedArray[complex128], other: _ArrayLikeComplex128_co, /) -> _MaskedArray[complex128]: ... + @overload + def __rtruediv__(self: _MaskedArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> _MaskedArray[complex128]: ... + @overload + def __rtruediv__(self: _MaskedArray[floating], other: _ArrayLikeFloat_co, /) -> _MaskedArray[floating]: ... + @overload + def __rtruediv__(self: _MaskedArrayFloat_co, other: _ArrayLike[floating], /) -> _MaskedArray[floating]: ... + @overload + def __rtruediv__(self: _MaskedArray[complexfloating], other: _ArrayLikeNumber_co, /) -> _MaskedArray[complexfloating]: ... + @overload + def __rtruediv__(self: _MaskedArrayNumber_co, other: _ArrayLike[complexfloating], /) -> _MaskedArray[complexfloating]: ... + @overload + def __rtruediv__(self: _MaskedArray[inexact], other: _ArrayLikeNumber_co, /) -> _MaskedArray[inexact]: ... + @overload + def __rtruediv__(self: _MaskedArray[number], other: _ArrayLikeNumber_co, /) -> _MaskedArray[number]: ... + @overload + def __rtruediv__(self: _MaskedArray[timedelta64], other: _ArrayLike[timedelta64], /) -> _MaskedArray[float64]: ... + @overload + def __rtruediv__(self: _MaskedArray[integer | floating], other: _ArrayLike[timedelta64], /) -> _MaskedArray[timedelta64]: ... + @overload + def __rtruediv__(self: _MaskedArray[object_], other: Any, /) -> Any: ... + @overload + def __rtruediv__(self: _MaskedArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + + # Keep in sync with `ndarray.__floordiv__` + @overload # type: ignore[override] + def __floordiv__[ScalarT: _RealNumber]( + self: _MaskedArray[ScalarT], + other: int | np.bool, + /, + ) -> MaskedArray[_ShapeT_co, dtype[ScalarT]]: ... + @overload + def __floordiv__[ScalarT: _RealNumber](self: _MaskedArray[ScalarT], other: _ArrayLikeBool_co, /) -> _MaskedArray[ScalarT]: ... + @overload + def __floordiv__(self: _MaskedArray[np.bool], other: _ArrayLikeBool_co, /) -> _MaskedArray[int8]: ... + @overload + def __floordiv__[ScalarT: _RealNumber]( + self: _MaskedArray[np.bool], + other: _ArrayLike[ScalarT], + /, + ) -> _MaskedArray[ScalarT]: ... + @overload + def __floordiv__(self: _MaskedArray[float64], other: _ArrayLikeFloat64_co, /) -> _MaskedArray[float64]: ... + @overload + def __floordiv__(self: _MaskedArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> _MaskedArray[float64]: ... + @overload + def __floordiv__(self: _MaskedArrayUInt_co, other: _ArrayLikeUInt_co, /) -> _MaskedArray[unsignedinteger]: ... + @overload + def __floordiv__(self: _MaskedArrayInt_co, other: _ArrayLikeInt_co, /) -> _MaskedArray[signedinteger]: ... + @overload + def __floordiv__(self: _MaskedArrayFloat_co, other: _ArrayLikeFloat_co, /) -> _MaskedArray[floating]: ... + @overload + def __floordiv__(self: _MaskedArray[timedelta64], other: _ArrayLike[timedelta64], /) -> _MaskedArray[int64]: ... + @overload + def __floordiv__(self: _MaskedArray[timedelta64], other: _ArrayLikeBool_co, /) -> NoReturn: ... + @overload + def __floordiv__(self: _MaskedArray[timedelta64], other: _ArrayLikeFloat_co, /) -> _MaskedArray[timedelta64]: ... + @overload + def __floordiv__(self: _MaskedArray[object_], other: Any, /) -> Any: ... + @overload + def __floordiv__(self: _MaskedArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + + # Keep in sync with `ndarray.__rfloordiv__` + @overload # type: ignore[override] + def __rfloordiv__[ScalarT: _RealNumber]( + self: _MaskedArray[ScalarT], + other: int | np.bool, + /, + ) -> MaskedArray[_ShapeT_co, dtype[ScalarT]]: ... + @overload + def __rfloordiv__[ScalarT: _RealNumber]( + self: _MaskedArray[ScalarT], + other: _ArrayLikeBool_co, + /, + ) -> _MaskedArray[ScalarT]: ... + @overload + def __rfloordiv__(self: _MaskedArray[np.bool], other: _ArrayLikeBool_co, /) -> _MaskedArray[int8]: ... + @overload + def __rfloordiv__[ScalarT: _RealNumber]( + self: _MaskedArray[np.bool], + other: _ArrayLike[ScalarT], + /, + ) -> _MaskedArray[ScalarT]: ... + @overload + def __rfloordiv__(self: _MaskedArray[float64], other: _ArrayLikeFloat64_co, /) -> _MaskedArray[float64]: ... + @overload + def __rfloordiv__(self: _MaskedArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> _MaskedArray[float64]: ... + @overload + def __rfloordiv__(self: _MaskedArrayUInt_co, other: _ArrayLikeUInt_co, /) -> _MaskedArray[unsignedinteger]: ... + @overload + def __rfloordiv__(self: _MaskedArrayInt_co, other: _ArrayLikeInt_co, /) -> _MaskedArray[signedinteger]: ... + @overload + def __rfloordiv__(self: _MaskedArrayFloat_co, other: _ArrayLikeFloat_co, /) -> _MaskedArray[floating]: ... + @overload + def __rfloordiv__(self: _MaskedArray[timedelta64], other: _ArrayLike[timedelta64], /) -> _MaskedArray[int64]: ... + @overload + def __rfloordiv__(self: _MaskedArray[floating | integer], other: _ArrayLike[timedelta64], /) -> _MaskedArray[timedelta64]: ... + @overload + def __rfloordiv__(self: _MaskedArray[object_], other: Any, /) -> Any: ... + @overload + def __rfloordiv__(self: _MaskedArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + + # Keep in sync with `ndarray.__pow__` (minus the `mod` parameter) + @overload # type: ignore[override] + def __pow__[ScalarT: np.number]( + self: _MaskedArray[ScalarT], + other: int | np.bool, + /, + ) -> MaskedArray[_ShapeT_co, dtype[ScalarT]]: ... + @overload + def __pow__[ScalarT: np.number](self: _MaskedArray[ScalarT], other: _ArrayLikeBool_co, /) -> _MaskedArray[ScalarT]: ... + @overload + def __pow__(self: _MaskedArray[np.bool], other: _ArrayLikeBool_co, /) -> _MaskedArray[int8]: ... + @overload + def __pow__[ScalarT: np.number](self: _MaskedArray[np.bool], other: _ArrayLike[ScalarT], /) -> _MaskedArray[ScalarT]: ... + @overload + def __pow__(self: _MaskedArray[float64], other: _ArrayLikeFloat64_co, /) -> _MaskedArray[float64]: ... + @overload + def __pow__(self: _MaskedArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> _MaskedArray[float64]: ... + @overload + def __pow__(self: _MaskedArray[complex128], other: _ArrayLikeComplex128_co, /) -> _MaskedArray[complex128]: ... + @overload + def __pow__(self: _MaskedArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> _MaskedArray[complex128]: ... + @overload + def __pow__(self: _MaskedArrayUInt_co, other: _ArrayLikeUInt_co, /) -> _MaskedArray[unsignedinteger]: ... + @overload + def __pow__(self: _MaskedArrayInt_co, other: _ArrayLikeInt_co, /) -> _MaskedArray[signedinteger]: ... + @overload + def __pow__(self: _MaskedArrayFloat_co, other: _ArrayLikeFloat_co, /) -> _MaskedArray[floating]: ... + @overload + def __pow__(self: _MaskedArrayComplex_co, other: _ArrayLikeComplex_co, /) -> _MaskedArray[complexfloating]: ... + @overload + def __pow__(self: _MaskedArray[number], other: _ArrayLikeNumber_co, /) -> _MaskedArray[number]: ... + @overload + def __pow__(self: _MaskedArray[object_], other: Any, /) -> Any: ... + @overload + def __pow__(self: _MaskedArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + + # Keep in sync with `ndarray.__rpow__` (minus the `mod` parameter) + @overload # type: ignore[override] + def __rpow__[ScalarT: np.number]( + self: _MaskedArray[ScalarT], + other: int | np.bool, + /, + ) -> MaskedArray[_ShapeT_co, dtype[ScalarT]]: ... + @overload + def __rpow__[ScalarT: np.number](self: _MaskedArray[ScalarT], other: _ArrayLikeBool_co, /) -> _MaskedArray[ScalarT]: ... + @overload + def __rpow__(self: _MaskedArray[np.bool], other: _ArrayLikeBool_co, /) -> _MaskedArray[int8]: ... + @overload + def __rpow__[ScalarT: np.number](self: _MaskedArray[np.bool], other: _ArrayLike[ScalarT], /) -> _MaskedArray[ScalarT]: ... + @overload + def __rpow__(self: _MaskedArray[float64], other: _ArrayLikeFloat64_co, /) -> _MaskedArray[float64]: ... + @overload + def __rpow__(self: _MaskedArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> _MaskedArray[float64]: ... + @overload + def __rpow__(self: _MaskedArray[complex128], other: _ArrayLikeComplex128_co, /) -> _MaskedArray[complex128]: ... + @overload + def __rpow__(self: _MaskedArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> _MaskedArray[complex128]: ... + @overload + def __rpow__(self: _MaskedArrayUInt_co, other: _ArrayLikeUInt_co, /) -> _MaskedArray[unsignedinteger]: ... + @overload + def __rpow__(self: _MaskedArrayInt_co, other: _ArrayLikeInt_co, /) -> _MaskedArray[signedinteger]: ... + @overload + def __rpow__(self: _MaskedArrayFloat_co, other: _ArrayLikeFloat_co, /) -> _MaskedArray[floating]: ... + @overload + def __rpow__(self: _MaskedArrayComplex_co, other: _ArrayLikeComplex_co, /) -> _MaskedArray[complexfloating]: ... + @overload + def __rpow__(self: _MaskedArray[number], other: _ArrayLikeNumber_co, /) -> _MaskedArray[number]: ... + @overload + def __rpow__(self: _MaskedArray[object_], other: Any, /) -> Any: ... + @overload + def __rpow__(self: _MaskedArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + + # + @property # type: ignore[misc] + def imag[ScalarT: np.generic]( # type: ignore[override] + self: _HasDTypeWithRealAndImag[object, ScalarT], + /, + ) -> MaskedArray[_ShapeT_co, dtype[ScalarT]]: ... + def get_imag[ScalarT: np.generic]( + self: _HasDTypeWithRealAndImag[object, ScalarT], + /, + ) -> MaskedArray[_ShapeT_co, dtype[ScalarT]]: ... + + # + @property # type: ignore[misc] + def real[ScalarT: np.generic]( # type: ignore[override] + self: _HasDTypeWithRealAndImag[ScalarT, object], + /, + ) -> MaskedArray[_ShapeT_co, dtype[ScalarT]]: ... + def get_real[ScalarT: np.generic]( + self: _HasDTypeWithRealAndImag[ScalarT, object], + /, + ) -> MaskedArray[_ShapeT_co, dtype[ScalarT]]: ... + + # keep in sync with `np.ma.count` + @overload + def count(self, axis: None = None, keepdims: Literal[False] | _NoValueType = ...) -> int: ... + @overload + def count(self, axis: _ShapeLike, keepdims: bool | _NoValueType = ...) -> NDArray[int_]: ... + @overload + def count(self, axis: _ShapeLike | None = None, *, keepdims: Literal[True]) -> NDArray[int_]: ... + @overload + def count(self, axis: _ShapeLike | None, keepdims: Literal[True]) -> NDArray[int_]: ... + + # Keep in sync with `ndarray.reshape` + # NOTE: reshape also accepts negative integers, so we can't use integer literals + @overload # (None) + def reshape(self, shape: None, /, *, order: _OrderACF = "C", copy: bool | None = None) -> Self: ... + @overload # (empty_sequence) + def reshape( + self, + shape: Sequence[Never], + /, + *, + order: _OrderACF = "C", + copy: bool | None = None, + ) -> MaskedArray[tuple[()], _DTypeT_co]: ... + @overload # (() | (int) | (int, int) | ....) # up to 8-d + def reshape[ShapeT: _Shape]( + self, + shape: ShapeT, + /, + *, + order: _OrderACF = "C", + copy: bool | None = None, + ) -> MaskedArray[ShapeT, _DTypeT_co]: ... + @overload # (index) + def reshape( + self, + size1: SupportsIndex, + /, + *, + order: _OrderACF = "C", + copy: bool | None = None, + ) -> MaskedArray[tuple[int], _DTypeT_co]: ... + @overload # (index, index) + def reshape( + self, + size1: SupportsIndex, + size2: SupportsIndex, + /, + *, + order: _OrderACF = "C", + copy: bool | None = None, + ) -> MaskedArray[tuple[int, int], _DTypeT_co]: ... + @overload # (index, index, index) + def reshape( + self, + size1: SupportsIndex, + size2: SupportsIndex, + size3: SupportsIndex, + /, + *, + order: _OrderACF = "C", + copy: bool | None = None, + ) -> MaskedArray[tuple[int, int, int], _DTypeT_co]: ... + @overload # (index, index, index, index) + def reshape( + self, + size1: SupportsIndex, + size2: SupportsIndex, + size3: SupportsIndex, + size4: SupportsIndex, + /, + *, + order: _OrderACF = "C", + copy: bool | None = None, + ) -> MaskedArray[tuple[int, int, int, int], _DTypeT_co]: ... + @overload # (int, *(index, ...)) + def reshape( + self, + size0: SupportsIndex, + /, + *shape: SupportsIndex, + order: _OrderACF = "C", + copy: bool | None = None, + ) -> MaskedArray[_AnyShape, _DTypeT_co]: ... + @overload # (sequence[index]) + def reshape( + self, + shape: Sequence[SupportsIndex], + /, + *, + order: _OrderACF = "C", + copy: bool | None = None, + ) -> MaskedArray[_AnyShape, _DTypeT_co]: ... + + def resize(self, newshape: Never, refcheck: bool = True, order: bool = False) -> NoReturn: ... # type: ignore[override] + def put(self, indices: _ArrayLikeInt_co, values: ArrayLike, mode: _ModeKind = "raise") -> None: ... + def ids(self) -> tuple[int, int]: ... + def iscontiguous(self) -> bool: ... + + # Keep in sync with `ma.core.all` + @overload # type: ignore[override] + def all( + self, + axis: None = None, + out: None = None, + keepdims: Literal[False] | _NoValueType = ..., + ) -> bool_: ... + @overload + def all( + self, + axis: _ShapeLike | None = None, + out: None = None, + *, + keepdims: Literal[True], + ) -> _MaskedArray[bool_]: ... + @overload + def all( + self, + axis: _ShapeLike | None, + out: None, + keepdims: Literal[True], + ) -> _MaskedArray[bool_]: ... + @overload + def all( + self, + axis: _ShapeLike | None = None, + out: None = None, + keepdims: bool | _NoValueType = ..., + ) -> bool_ | _MaskedArray[bool_]: ... + @overload + def all[ArrayT: np.ndarray]( + self, + axis: _ShapeLike | None = None, + *, + out: ArrayT, + keepdims: bool | _NoValueType = ..., + ) -> ArrayT: ... + @overload + def all[ArrayT: np.ndarray]( + self, + axis: _ShapeLike | None, + out: ArrayT, + keepdims: bool | _NoValueType = ..., + ) -> ArrayT: ... + + # Keep in sync with `ma.core.any` + @overload # type: ignore[override] + def any( + self, + axis: None = None, + out: None = None, + keepdims: Literal[False] | _NoValueType = ..., + ) -> bool_: ... + @overload + def any( + self, + axis: _ShapeLike | None = None, + out: None = None, + *, + keepdims: Literal[True], + ) -> _MaskedArray[bool_]: ... + @overload + def any( + self, + axis: _ShapeLike | None, + out: None, keepdims: Literal[True], ) -> _MaskedArray[bool_]: ... @overload - def any( + def any( + self, + axis: _ShapeLike | None = None, + out: None = None, + keepdims: bool | _NoValueType = ..., + ) -> bool_ | _MaskedArray[bool_]: ... + @overload + def any[ArrayT: np.ndarray]( + self, + axis: _ShapeLike | None = None, + *, + out: ArrayT, + keepdims: bool | _NoValueType = ..., + ) -> ArrayT: ... + @overload + def any[ArrayT: np.ndarray]( + self, + axis: _ShapeLike | None, + out: ArrayT, + keepdims: bool | _NoValueType = ..., + ) -> ArrayT: ... + + # Keep in sync with `ndarray.trace` and `ma.core.trace` + @overload + def trace( + self, # >= 2D MaskedArray + offset: SupportsIndex = 0, + axis1: SupportsIndex = 0, + axis2: SupportsIndex = 1, + dtype: DTypeLike | None = None, + out: None = None, + ) -> Any: ... + @overload + def trace[ArrayT: np.ndarray]( + self, # >= 2D MaskedArray + offset: SupportsIndex = 0, + axis1: SupportsIndex = 0, + axis2: SupportsIndex = 1, + dtype: DTypeLike | None = None, + *, + out: ArrayT, + ) -> ArrayT: ... + @overload + def trace[ArrayT: np.ndarray]( + self, # >= 2D MaskedArray + offset: SupportsIndex, + axis1: SupportsIndex, + axis2: SupportsIndex, + dtype: DTypeLike | None, + out: ArrayT, + ) -> ArrayT: ... + + # This differs from `ndarray.dot`, in that 1D dot 1D returns a 0D array. + @overload + def dot(self, b: ArrayLike, out: None = None, strict: bool = False) -> _MaskedArray[Any]: ... + @overload + def dot[ArrayT: np.ndarray](self, b: ArrayLike, out: ArrayT, strict: bool = False) -> ArrayT: ... + + # Keep in sync with `ma.core.sum` + @overload # type: ignore[override] + def sum( + self, + /, + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + out: None = None, + keepdims: bool | _NoValueType = ..., + ) -> Any: ... + @overload + def sum[ArrayT: np.ndarray]( + self, + /, + axis: _ShapeLike | None, + dtype: DTypeLike | None, + out: ArrayT, + keepdims: bool | _NoValueType = ..., + ) -> ArrayT: ... + @overload + def sum[ArrayT: np.ndarray]( + self, + /, + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + *, + out: ArrayT, + keepdims: bool | _NoValueType = ..., + ) -> ArrayT: ... + + # Keep in sync with `ndarray.cumsum` and `ma.core.cumsum` + @overload # out: None (default) + def cumsum(self, /, axis: SupportsIndex | None = None, dtype: DTypeLike | None = None, out: None = None) -> MaskedArray: ... + @overload # out: ndarray + def cumsum[ArrayT: np.ndarray](self, /, axis: SupportsIndex | None, dtype: DTypeLike | None, out: ArrayT) -> ArrayT: ... + @overload + def cumsum[ArrayT: np.ndarray]( + self, + /, + axis: SupportsIndex | None = None, + dtype: DTypeLike | None = None, + *, + out: ArrayT, + ) -> ArrayT: ... + + # Keep in sync with `ma.core.prod` + @overload # type: ignore[override] + def prod( + self, + /, + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + out: None = None, + keepdims: bool | _NoValueType = ..., + ) -> Any: ... + @overload + def prod[ArrayT: np.ndarray]( + self, + /, + axis: _ShapeLike | None, + dtype: DTypeLike | None, + out: ArrayT, + keepdims: bool | _NoValueType = ..., + ) -> ArrayT: ... + @overload + def prod[ArrayT: np.ndarray]( + self, + /, + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + *, + out: ArrayT, + keepdims: bool | _NoValueType = ..., + ) -> ArrayT: ... + + product = prod + + # Keep in sync with `ndarray.cumprod` and `ma.core.cumprod` + @overload # out: None (default) + def cumprod(self, /, axis: SupportsIndex | None = None, dtype: DTypeLike | None = None, out: None = None) -> MaskedArray: ... + @overload # out: ndarray + def cumprod[ArrayT: np.ndarray](self, /, axis: SupportsIndex | None, dtype: DTypeLike | None, out: ArrayT) -> ArrayT: ... + @overload + def cumprod[ArrayT: np.ndarray]( + self, + /, + axis: SupportsIndex | None = None, + dtype: DTypeLike | None = None, + *, + out: ArrayT, + ) -> ArrayT: ... + + # Keep in sync with `ma.core.mean` + @overload # type: ignore[override] + def mean( + self, + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + out: None = None, + keepdims: bool | _NoValueType = ..., + ) -> Any: ... + @overload + def mean[ArrayT: np.ndarray]( + self, + /, + axis: _ShapeLike | None, + dtype: DTypeLike | None, + out: ArrayT, + keepdims: bool | _NoValueType = ..., + ) -> ArrayT: ... + @overload + def mean[ArrayT: np.ndarray]( + self, + /, + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + *, + out: ArrayT, + keepdims: bool | _NoValueType = ..., + ) -> ArrayT: ... + + # keep roughly in sync with `ma.core.anom` + @overload + def anom(self, axis: SupportsIndex | None = None, dtype: None = None) -> Self: ... + @overload + def anom(self, axis: SupportsIndex | None = None, *, dtype: DTypeLike) -> MaskedArray[_ShapeT_co, dtype]: ... + @overload + def anom(self, axis: SupportsIndex | None, dtype: DTypeLike) -> MaskedArray[_ShapeT_co, dtype]: ... + + # keep in sync with `std` and `ma.core.var` + @overload # type: ignore[override] + def var( self, axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, out: None = None, + ddof: float = 0, keepdims: bool | _NoValueType = ..., - ) -> bool_ | _MaskedArray[bool_]: ... + mean: _ArrayLikeNumber_co | _NoValueType = ..., + ) -> Any: ... @overload - def any( + def var[ArrayT: np.ndarray]( + self, + axis: _ShapeLike | None, + dtype: DTypeLike | None, + out: ArrayT, + ddof: float = 0, + keepdims: bool | _NoValueType = ..., + mean: _ArrayLikeNumber_co | _NoValueType = ..., + ) -> ArrayT: ... + @overload + def var[ArrayT: np.ndarray]( self, axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, *, - out: _ArrayT, + out: ArrayT, + ddof: float = 0, keepdims: bool | _NoValueType = ..., - ) -> _ArrayT: ... + mean: _ArrayLikeNumber_co | _NoValueType = ..., + ) -> ArrayT: ... + + # keep in sync with `var` and `ma.core.std` + @overload # type: ignore[override] + def std( + self, + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + out: None = None, + ddof: float = 0, + keepdims: bool | _NoValueType = ..., + mean: _ArrayLikeNumber_co | _NoValueType = ..., + ) -> Any: ... @overload - def any( + def std[ArrayT: np.ndarray]( self, axis: _ShapeLike | None, - out: _ArrayT, + dtype: DTypeLike | None, + out: ArrayT, + ddof: float = 0, keepdims: bool | _NoValueType = ..., - ) -> _ArrayT: ... - - def nonzero(self) -> tuple[_Array1D[intp], *tuple[_Array1D[intp], ...]]: ... - def trace(self, offset=..., axis1=..., axis2=..., dtype=..., out=...): ... - def dot(self, b, out=..., strict=...): ... - def sum(self, axis=..., dtype=..., out=..., keepdims=...): ... - def cumsum(self, axis=..., dtype=..., out=...): ... - def prod(self, axis=..., dtype=..., out=..., keepdims=...): ... - product: Any - def cumprod(self, axis=..., dtype=..., out=...): ... - def mean(self, axis=..., dtype=..., out=..., keepdims=...): ... - def anom(self, axis=..., dtype=...): ... - def var(self, axis=..., dtype=..., out=..., ddof=..., keepdims=...): ... - def std(self, axis=..., dtype=..., out=..., ddof=..., keepdims=...): ... - def round(self, decimals=..., out=...): ... - def argsort(self, axis=..., kind=..., order=..., endwith=..., fill_value=..., *, stable=...): ... + mean: _ArrayLikeNumber_co | _NoValueType = ..., + ) -> ArrayT: ... + @overload + def std[ArrayT: np.ndarray]( + self, + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + *, + out: ArrayT, + ddof: float = 0, + keepdims: bool | _NoValueType = ..., + mean: _ArrayLikeNumber_co | _NoValueType = ..., + ) -> ArrayT: ... + + # Keep in sync with `ndarray.round` + @overload # out=None (default) + def round(self, /, decimals: SupportsIndex = 0, out: None = None) -> Self: ... + @overload # out=ndarray + def round[ArrayT: np.ndarray](self, /, decimals: SupportsIndex, out: ArrayT) -> ArrayT: ... + @overload + def round[ArrayT: np.ndarray](self, /, decimals: SupportsIndex = 0, *, out: ArrayT) -> ArrayT: ... + + def argsort( # type: ignore[override] + self, + axis: SupportsIndex | _NoValueType = ..., + kind: _SortKind | None = None, + order: str | Sequence[str] | None = None, + endwith: bool = True, + fill_value: _ScalarLike_co | None = None, + *, + stable: bool = False, + ) -> _MaskedArray[intp]: ... # Keep in-sync with np.ma.argmin @overload # type: ignore[override] @@ -779,23 +2208,23 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): keepdims: bool | _NoValueType = ..., ) -> Any: ... @overload - def argmin( + def argmin[ArrayT: np.ndarray]( self, axis: SupportsIndex | None = None, fill_value: _ScalarLike_co | None = None, *, - out: _ArrayT, + out: ArrayT, keepdims: bool | _NoValueType = ..., - ) -> _ArrayT: ... + ) -> ArrayT: ... @overload - def argmin( + def argmin[ArrayT: np.ndarray]( self, axis: SupportsIndex | None, fill_value: _ScalarLike_co | None, - out: _ArrayT, + out: ArrayT, *, keepdims: bool | _NoValueType = ..., - ) -> _ArrayT: ... + ) -> ArrayT: ... # Keep in-sync with np.ma.argmax @overload # type: ignore[override] @@ -817,23 +2246,23 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): keepdims: bool | _NoValueType = ..., ) -> Any: ... @overload - def argmax( + def argmax[ArrayT: np.ndarray]( self, axis: SupportsIndex | None = None, fill_value: _ScalarLike_co | None = None, *, - out: _ArrayT, + out: ArrayT, keepdims: bool | _NoValueType = ..., - ) -> _ArrayT: ... + ) -> ArrayT: ... @overload - def argmax( + def argmax[ArrayT: np.ndarray]( self, axis: SupportsIndex | None, fill_value: _ScalarLike_co | None, - out: _ArrayT, + out: ArrayT, *, keepdims: bool | _NoValueType = ..., - ) -> _ArrayT: ... + ) -> ArrayT: ... # def sort( # type: ignore[override] @@ -849,13 +2278,13 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): # @overload # type: ignore[override] - def min( - self: _MaskedArray[_ScalarT], + def min[ScalarT: np.generic]( + self: _MaskedArray[ScalarT], axis: None = None, out: None = None, fill_value: _ScalarLike_co | None = None, keepdims: Literal[False] | _NoValueType = ..., - ) -> _ScalarT: ... + ) -> ScalarT: ... @overload def min( self, @@ -865,32 +2294,32 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): keepdims: bool | _NoValueType = ... ) -> Any: ... @overload - def min( + def min[ArrayT: np.ndarray]( self, axis: _ShapeLike | None, - out: _ArrayT, + out: ArrayT, fill_value: _ScalarLike_co | None = None, keepdims: bool | _NoValueType = ..., - ) -> _ArrayT: ... + ) -> ArrayT: ... @overload - def min( + def min[ArrayT: np.ndarray]( self, axis: _ShapeLike | None = None, *, - out: _ArrayT, + out: ArrayT, fill_value: _ScalarLike_co | None = None, keepdims: bool | _NoValueType = ..., - ) -> _ArrayT: ... + ) -> ArrayT: ... # @overload # type: ignore[override] - def max( - self: _MaskedArray[_ScalarT], + def max[ScalarT: np.generic]( + self: _MaskedArray[ScalarT], axis: None = None, out: None = None, fill_value: _ScalarLike_co | None = None, keepdims: Literal[False] | _NoValueType = ..., - ) -> _ScalarT: ... + ) -> ScalarT: ... @overload def max( self, @@ -900,32 +2329,32 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): keepdims: bool | _NoValueType = ... ) -> Any: ... @overload - def max( + def max[ArrayT: np.ndarray]( self, axis: _ShapeLike | None, - out: _ArrayT, + out: ArrayT, fill_value: _ScalarLike_co | None = None, keepdims: bool | _NoValueType = ..., - ) -> _ArrayT: ... + ) -> ArrayT: ... @overload - def max( + def max[ArrayT: np.ndarray]( self, axis: _ShapeLike | None = None, *, - out: _ArrayT, + out: ArrayT, fill_value: _ScalarLike_co | None = None, keepdims: bool | _NoValueType = ..., - ) -> _ArrayT: ... + ) -> ArrayT: ... # @overload - def ptp( - self: _MaskedArray[_ScalarT], + def ptp[ScalarT: np.generic]( + self: _MaskedArray[ScalarT], axis: None = None, out: None = None, fill_value: _ScalarLike_co | None = None, keepdims: Literal[False] = False, - ) -> _ScalarT: ... + ) -> ScalarT: ... @overload def ptp( self, @@ -935,22 +2364,22 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): keepdims: bool = False, ) -> Any: ... @overload - def ptp( + def ptp[ArrayT: np.ndarray]( self, axis: _ShapeLike | None, - out: _ArrayT, + out: ArrayT, fill_value: _ScalarLike_co | None = None, keepdims: bool = False, - ) -> _ArrayT: ... + ) -> ArrayT: ... @overload - def ptp( + def ptp[ArrayT: np.ndarray]( self, axis: _ShapeLike | None = None, *, - out: _ArrayT, + out: ArrayT, fill_value: _ScalarLike_co | None = None, keepdims: bool = False, - ) -> _ArrayT: ... + ) -> ArrayT: ... # @overload @@ -993,84 +2422,125 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): ) -> _MaskedArray[intp]: ... # Keep in-sync with np.ma.take - @overload - def take( # type: ignore[overload-overlap] - self: _MaskedArray[_ScalarT], + @overload # type: ignore[override] + def take[ScalarT: np.generic]( + self: _MaskedArray[ScalarT], indices: _IntLike_co, axis: None = None, out: None = None, - mode: _ModeKind = 'raise' - ) -> _ScalarT: ... + mode: _ModeKind = "raise" + ) -> ScalarT: ... @overload - def take( - self: _MaskedArray[_ScalarT], + def take[ScalarT: np.generic]( + self: _MaskedArray[ScalarT], indices: _ArrayLikeInt_co, axis: SupportsIndex | None = None, out: None = None, - mode: _ModeKind = 'raise', - ) -> _MaskedArray[_ScalarT]: ... + mode: _ModeKind = "raise", + ) -> _MaskedArray[ScalarT]: ... @overload - def take( + def take[ArrayT: np.ndarray]( self, indices: _ArrayLikeInt_co, axis: SupportsIndex | None, - out: _ArrayT, - mode: _ModeKind = 'raise', - ) -> _ArrayT: ... + out: ArrayT, + mode: _ModeKind = "raise", + ) -> ArrayT: ... @overload - def take( + def take[ArrayT: np.ndarray]( self, indices: _ArrayLikeInt_co, axis: SupportsIndex | None = None, *, - out: _ArrayT, - mode: _ModeKind = 'raise', - ) -> _ArrayT: ... + out: ArrayT, + mode: _ModeKind = "raise", + ) -> ArrayT: ... - copy: Any - diagonal: Any - flatten: Any + # keep in sync with `ndarray.diagonal` + @override + def diagonal( + self, + /, + offset: SupportsIndex = 0, + axis1: SupportsIndex = 0, + axis2: SupportsIndex = 1, + ) -> MaskedArray[_AnyShape, _DTypeT_co]: ... + # keep in sync with `ndarray.repeat` + @override @overload def repeat( self, + /, repeats: _ArrayLikeInt_co, axis: None = None, ) -> MaskedArray[tuple[int], _DTypeT_co]: ... @overload def repeat( self, + /, repeats: _ArrayLikeInt_co, axis: SupportsIndex, ) -> MaskedArray[_AnyShape, _DTypeT_co]: ... - squeeze: Any + # keep in sync with `ndarray.flatten` and `ndarray.ravel` + @override + def flatten(self, /, order: _OrderKACF = "C") -> MaskedArray[tuple[int], _DTypeT_co]: ... + @override + def ravel(self, order: _OrderKACF = "C") -> MaskedArray[tuple[int], _DTypeT_co]: ... - def swapaxes( + # keep in sync with `ndarray.squeeze` + @override + def squeeze( self, - axis1: SupportsIndex, - axis2: SupportsIndex, - / + /, + axis: SupportsIndex | tuple[SupportsIndex, ...] | None = None, ) -> MaskedArray[_AnyShape, _DTypeT_co]: ... # - def toflex(self) -> Incomplete: ... - def torecords(self) -> Incomplete: ... - def tolist(self, fill_value: Incomplete | None = None) -> Incomplete: ... - @deprecated("tostring() is deprecated. Use tobytes() instead.") - def tostring(self, /, fill_value: Incomplete | None = None, order: _OrderKACF = "C") -> bytes: ... # type: ignore[override] + def toflex(self) -> MaskedArray[_ShapeT_co, np.dtype[np.void]]: ... + def torecords(self) -> MaskedArray[_ShapeT_co, np.dtype[np.void]]: ... + + # + @override def tobytes(self, /, fill_value: Incomplete | None = None, order: _OrderKACF = "C") -> bytes: ... # type: ignore[override] - def tofile(self, /, fid: Incomplete, sep: str = "", format: str = "%s") -> Incomplete: ... + + # keep in sync with `ndarray.tolist` + @override + @overload + def tolist[T](self: MaskedArray[tuple[Never], dtype[generic[T]]], /, fill_value: _ScalarLike_co | None = None) -> Any: ... + @overload + def tolist[T](self: MaskedArray[tuple[()], dtype[generic[T]]], /, fill_value: _ScalarLike_co | None = None) -> T: ... + @overload + def tolist[T](self: _Masked1D[np.generic[T]], /, fill_value: _ScalarLike_co | None = None) -> list[T]: ... + @overload + def tolist[T]( + self: MaskedArray[tuple[int, int], dtype[generic[T]]], + /, + fill_value: _ScalarLike_co | None = None, + ) -> list[list[T]]: ... + @overload + def tolist[T]( + self: MaskedArray[tuple[int, int, int], dtype[generic[T]]], + /, + fill_value: _ScalarLike_co | None = None, + ) -> list[list[list[T]]]: ... + @overload + def tolist(self, /, fill_value: _ScalarLike_co | None = None) -> Any: ... + + # NOTE: will raise `NotImplementedError` + @override + def tofile(self, /, fid: Never, sep: str = "", format: str = "%s") -> NoReturn: ... # type: ignore[override] # - def __reduce__(self): ... - def __deepcopy__(self, memo=...): ... + @override + def __deepcopy__(self, memo: dict[int, Any] | None = None) -> Self: ... # Keep `dtype` at the bottom to avoid name conflicts with `np.dtype` @property def dtype(self) -> _DTypeT_co: ... @dtype.setter - def dtype(self: MaskedArray[_AnyShape, _DTypeT], dtype: _DTypeT, /) -> None: ... + def dtype[DTypeT: np.dtype](self: MaskedArray[_AnyShape, DTypeT], dtype: DTypeT, /) -> None: ... class mvoid(MaskedArray[_ShapeT_co, _DTypeT_co]): def __new__( @@ -1087,208 +2557,696 @@ class mvoid(MaskedArray[_ShapeT_co, _DTypeT_co]): def __setitem__(self, indx, value): ... def __iter__(self): ... def __len__(self): ... - def filled(self, fill_value=...): ... - def tolist(self): ... + def filled(self, fill_value=None): ... + def tolist(self): ... # type: ignore[override] + +def isMaskedArray(x: object) -> TypeIs[MaskedArray]: ... +def isarray(x: object) -> TypeIs[MaskedArray]: ... # alias to isMaskedArray +def isMA(x: object) -> TypeIs[MaskedArray]: ... # alias to isMaskedArray + +# 0D float64 array +class MaskedConstant(MaskedArray[tuple[()], dtype[float64]]): + def __new__(cls) -> Self: ... + + # these overrides are no-ops + @override + def __iadd__(self, other: _Ignored, /) -> Self: ... # type: ignore[override] + @override + def __isub__(self, other: _Ignored, /) -> Self: ... # type: ignore[override] + @override + def __imul__(self, other: _Ignored, /) -> Self: ... # type: ignore[override] + @override + def __ifloordiv__(self, other: _Ignored, /) -> Self: ... + @override + def __itruediv__(self, other: _Ignored, /) -> Self: ... # type: ignore[override] + @override + def __ipow__(self, other: _Ignored, /) -> Self: ... # type: ignore[override] + @override + def __deepcopy__(self, /, memo: _Ignored) -> Self: ... # type: ignore[override] + @override + def copy(self, /, *args: _Ignored, **kwargs: _Ignored) -> Self: ... + +masked: Final[MaskedConstant] = ... +masked_singleton: Final[MaskedConstant] = ... + +type masked_array = MaskedArray + +# keep in sync with `MaskedArray.__new__` +@overload +def array[ScalarT: np.generic]( + data: _ArrayLike[ScalarT], + dtype: None = None, + copy: bool = False, + order: _OrderKACF | None = None, + mask: _ArrayLikeBool_co = nomask, + fill_value: _ScalarLike_co | None = None, + keep_mask: bool = True, + hard_mask: bool = False, + shrink: bool = True, + subok: bool = True, + ndmin: int = 0, +) -> _MaskedArray[ScalarT]: ... +@overload +def array[ScalarT: np.generic]( + data: object, + dtype: _DTypeLike[ScalarT], + copy: bool = False, + order: _OrderKACF | None = None, + mask: _ArrayLikeBool_co = nomask, + fill_value: _ScalarLike_co | None = None, + keep_mask: bool = True, + hard_mask: bool = False, + shrink: bool = True, + subok: bool = True, + ndmin: int = 0, +) -> _MaskedArray[ScalarT]: ... +@overload +def array[ScalarT: np.generic]( + data: object, + dtype: DTypeLike | None = None, + copy: bool = False, + order: _OrderKACF | None = None, + mask: _ArrayLikeBool_co = nomask, + fill_value: _ScalarLike_co | None = None, + keep_mask: bool = True, + hard_mask: bool = False, + shrink: bool = True, + subok: bool = True, + ndmin: int = 0, +) -> _MaskedArray[ScalarT]: ... + +# keep in sync with `array` +@overload +def asarray[ScalarT: np.generic]( + a: _ArrayLike[ScalarT], + dtype: None = None, + order: _OrderKACF | None = None, +) -> _MaskedArray[ScalarT]: ... +@overload +def asarray[ScalarT: np.generic]( + a: object, + dtype: _DTypeLike[ScalarT], + order: _OrderKACF | None = None, +) -> _MaskedArray[ScalarT]: ... +@overload +def asarray[ScalarT: np.generic]( + a: object, + dtype: DTypeLike | None = None, + order: _OrderKACF | None = None, +) -> _MaskedArray[ScalarT]: ... + +# keep in sync with `asarray` (but note the additional first overload) +@overload +def asanyarray[MArrayT: MaskedArray](a: MArrayT, dtype: None = None, order: _OrderKACF | None = None) -> MArrayT: ... +@overload +def asanyarray[ScalarT: np.generic]( + a: _ArrayLike[ScalarT], + dtype: None = None, + order: _OrderKACF | None = None, +) -> _MaskedArray[ScalarT]: ... +@overload +def asanyarray[ScalarT: np.generic]( + a: object, + dtype: _DTypeLike[ScalarT], + order: _OrderKACF | None = None, +) -> _MaskedArray[ScalarT]: ... +@overload +def asanyarray[ScalarT: np.generic]( + a: object, + dtype: DTypeLike | None = None, + order: _OrderKACF | None = None, +) -> _MaskedArray[ScalarT]: ... + +# +def is_masked(x: object) -> bool: ... + +@overload +def min[ScalarT: np.generic]( + obj: _ArrayLike[ScalarT], + axis: None = None, + out: None = None, + fill_value: _ScalarLike_co | None = None, + keepdims: Literal[False] | _NoValueType = ..., +) -> ScalarT: ... +@overload +def min( + obj: ArrayLike, + axis: _ShapeLike | None = None, + out: None = None, + fill_value: _ScalarLike_co | None = None, + keepdims: bool | _NoValueType = ... +) -> Any: ... +@overload +def min[ArrayT: np.ndarray]( + obj: ArrayLike, + axis: _ShapeLike | None, + out: ArrayT, + fill_value: _ScalarLike_co | None = None, + keepdims: bool | _NoValueType = ..., +) -> ArrayT: ... +@overload +def min[ArrayT: np.ndarray]( + obj: ArrayLike, + axis: _ShapeLike | None = None, + *, + out: ArrayT, + fill_value: _ScalarLike_co | None = None, + keepdims: bool | _NoValueType = ..., +) -> ArrayT: ... + +@overload +def max[ScalarT: np.generic]( + obj: _ArrayLike[ScalarT], + axis: None = None, + out: None = None, + fill_value: _ScalarLike_co | None = None, + keepdims: Literal[False] | _NoValueType = ..., +) -> ScalarT: ... +@overload +def max( + obj: ArrayLike, + axis: _ShapeLike | None = None, + out: None = None, + fill_value: _ScalarLike_co | None = None, + keepdims: bool | _NoValueType = ... +) -> Any: ... +@overload +def max[ArrayT: np.ndarray]( + obj: ArrayLike, + axis: _ShapeLike | None, + out: ArrayT, + fill_value: _ScalarLike_co | None = None, + keepdims: bool | _NoValueType = ..., +) -> ArrayT: ... +@overload +def max[ArrayT: np.ndarray]( + obj: ArrayLike, + axis: _ShapeLike | None = None, + *, + out: ArrayT, + fill_value: _ScalarLike_co | None = None, + keepdims: bool | _NoValueType = ..., +) -> ArrayT: ... + +@overload +def ptp[ScalarT: np.generic]( + obj: _ArrayLike[ScalarT], + axis: None = None, + out: None = None, + fill_value: _ScalarLike_co | None = None, + keepdims: Literal[False] | _NoValueType = ..., +) -> ScalarT: ... +@overload +def ptp( + obj: ArrayLike, + axis: _ShapeLike | None = None, + out: None = None, + fill_value: _ScalarLike_co | None = None, + keepdims: bool | _NoValueType = ... +) -> Any: ... +@overload +def ptp[ArrayT: np.ndarray]( + obj: ArrayLike, + axis: _ShapeLike | None, + out: ArrayT, + fill_value: _ScalarLike_co | None = None, + keepdims: bool | _NoValueType = ..., +) -> ArrayT: ... +@overload +def ptp[ArrayT: np.ndarray]( + obj: ArrayLike, + axis: _ShapeLike | None = None, + *, + out: ArrayT, + fill_value: _ScalarLike_co | None = None, + keepdims: bool | _NoValueType = ..., +) -> ArrayT: ... + +# we cannot meaningfully annotate `frommethod` further, because the callable signature +# of the return type fully depends on the *value* of `methodname` and `reversed` in +# a way that cannot be expressed in the Python type system. +def _frommethod(methodname: str, reversed: bool = False) -> types.FunctionType: ... + +# NOTE: The following `*_mask` functions will accept any array-like input runtime, but +# since their use-cases are specific to masks, they only accept `MaskedArray` inputs. + +# keep in sync with `MaskedArray.harden_mask` +def harden_mask[MArrayT: MaskedArray](a: MArrayT) -> MArrayT: ... +# keep in sync with `MaskedArray.soften_mask` +def soften_mask[MArrayT: MaskedArray](a: MArrayT) -> MArrayT: ... +# keep in sync with `MaskedArray.shrink_mask` +def shrink_mask[MArrayT: MaskedArray](a: MArrayT) -> MArrayT: ... + +# keep in sync with `MaskedArray.ids` +def ids(a: ArrayLike) -> tuple[int, int]: ... + +# keep in sync with `ndarray.nonzero` +def nonzero(a: ArrayLike) -> tuple[_Array1D[np.intp], ...]: ... -def isMaskedArray(x): ... -isarray = isMaskedArray -isMA = isMaskedArray +# keep first overload in sync with `MaskedArray.ravel` +@overload +def ravel[DTypeT: np.dtype](a: np.ndarray[Any, DTypeT], order: _OrderKACF = "C") -> MaskedArray[tuple[int], DTypeT]: ... +@overload +def ravel[ScalarT: np.generic](a: _ArrayLike[ScalarT], order: _OrderKACF = "C") -> _Masked1D[ScalarT]: ... +@overload +def ravel(a: ArrayLike, order: _OrderKACF = "C") -> MaskedArray[tuple[int], _DTypeT_co]: ... + +# keep roughly in sync with `lib._function_base_impl.copy` +@overload +def copy[MArrayT: MaskedArray](a: MArrayT, order: _OrderKACF = "C") -> MArrayT: ... +@overload +def copy[ShapeT: _Shape, DTypeT: np.dtype]( + a: np.ndarray[ShapeT, DTypeT], + order: _OrderKACF = "C", +) -> MaskedArray[ShapeT, DTypeT]: ... +@overload +def copy[ScalarT: np.generic](a: _ArrayLike[ScalarT], order: _OrderKACF = "C") -> _MaskedArray[ScalarT]: ... +@overload +def copy(a: ArrayLike, order: _OrderKACF = "C") -> _MaskedArray[Incomplete]: ... + +# keep in sync with `_core.fromnumeric.diagonal` +@overload +def diagonal[ScalarT: np.generic]( + a: _ArrayLike[ScalarT], + offset: SupportsIndex = 0, + axis1: SupportsIndex = 0, + axis2: SupportsIndex = 1, +) -> NDArray[ScalarT]: ... +@overload +def diagonal( + a: ArrayLike, + offset: SupportsIndex = 0, + axis1: SupportsIndex = 0, + axis2: SupportsIndex = 1, +) -> NDArray[Incomplete]: ... + +# keep in sync with `_core.fromnumeric.repeat` +@overload +def repeat[ScalarT: np.generic](a: _ArrayLike[ScalarT], repeats: _ArrayLikeInt_co, axis: None = None) -> _Masked1D[ScalarT]: ... +@overload +def repeat[ScalarT: np.generic]( + a: _ArrayLike[ScalarT], + repeats: _ArrayLikeInt_co, + axis: SupportsIndex, +) -> _MaskedArray[ScalarT]: ... +@overload +def repeat(a: ArrayLike, repeats: _ArrayLikeInt_co, axis: None = None) -> _Masked1D[Incomplete]: ... +@overload +def repeat(a: ArrayLike, repeats: _ArrayLikeInt_co, axis: SupportsIndex) -> _MaskedArray[Incomplete]: ... + +# keep in sync with `_core.fromnumeric.swapaxes` +@overload +def swapaxes[MArrayT: MaskedArray](a: MArrayT, axis1: SupportsIndex, axis2: SupportsIndex) -> MArrayT: ... +@overload +def swapaxes[ScalarT: np.generic]( + a: _ArrayLike[ScalarT], + axis1: SupportsIndex, + axis2: SupportsIndex, +) -> _MaskedArray[ScalarT]: ... +@overload +def swapaxes(a: ArrayLike, axis1: SupportsIndex, axis2: SupportsIndex) -> _MaskedArray[Incomplete]: ... + +# NOTE: The `MaskedArray.anom` definition is specific to `MaskedArray`, so we need +# additional overloads to cover the array-like input here. +@overload # a: MaskedArray, dtype=None +def anom[MArrayT: MaskedArray](a: MArrayT, axis: SupportsIndex | None = None, dtype: None = None) -> MArrayT: ... +@overload # a: array-like, dtype=None +def anom[ScalarT: np.generic]( + a: _ArrayLike[ScalarT], + axis: SupportsIndex | None = None, + dtype: None = None, +) -> _MaskedArray[ScalarT]: ... +@overload # a: unknown array-like, dtype: dtype-like (positional) +def anom[ScalarT: np.generic](a: ArrayLike, axis: SupportsIndex | None, dtype: _DTypeLike[ScalarT]) -> _MaskedArray[ScalarT]: ... +@overload # a: unknown array-like, dtype: dtype-like (keyword) +def anom[ScalarT: np.generic]( + a: ArrayLike, + axis: SupportsIndex | None = None, + *, + dtype: _DTypeLike[ScalarT], +) -> _MaskedArray[ScalarT]: ... +@overload # a: unknown array-like, dtype: unknown dtype-like (positional) +def anom(a: ArrayLike, axis: SupportsIndex | None, dtype: DTypeLike) -> _MaskedArray[Incomplete]: ... +@overload # a: unknown array-like, dtype: unknown dtype-like (keyword) +def anom(a: ArrayLike, axis: SupportsIndex | None = None, *, dtype: DTypeLike) -> _MaskedArray[Incomplete]: ... + +anomalies = anom + +# Keep in sync with `any` and `MaskedArray.all` +@overload +def all(a: ArrayLike, axis: None = None, out: None = None, keepdims: Literal[False] | _NoValueType = ...) -> np.bool: ... +@overload +def all(a: ArrayLike, axis: _ShapeLike | None, out: None, keepdims: Literal[True]) -> _MaskedArray[np.bool]: ... +@overload +def all(a: ArrayLike, axis: _ShapeLike | None = None, out: None = None, *, keepdims: Literal[True]) -> _MaskedArray[np.bool]: ... +@overload +def all( + a: ArrayLike, + axis: _ShapeLike | None = None, + out: None = None, + keepdims: bool | _NoValueType = ..., +) -> np.bool | _MaskedArray[np.bool]: ... +@overload +def all[ArrayT: np.ndarray]( + a: ArrayLike, + axis: _ShapeLike | None, + out: ArrayT, + keepdims: bool | _NoValueType = ..., +) -> ArrayT: ... +@overload +def all[ArrayT: np.ndarray]( + a: ArrayLike, + axis: _ShapeLike | None = None, + *, + out: ArrayT, + keepdims: bool | _NoValueType = ..., +) -> ArrayT: ... + +# Keep in sync with `all` and `MaskedArray.any` +@overload +def any(a: ArrayLike, axis: None = None, out: None = None, keepdims: Literal[False] | _NoValueType = ...) -> np.bool: ... +@overload +def any(a: ArrayLike, axis: _ShapeLike | None, out: None, keepdims: Literal[True]) -> _MaskedArray[np.bool]: ... +@overload +def any(a: ArrayLike, axis: _ShapeLike | None = None, out: None = None, *, keepdims: Literal[True]) -> _MaskedArray[np.bool]: ... +@overload +def any( + a: ArrayLike, + axis: _ShapeLike | None = None, + out: None = None, + keepdims: bool | _NoValueType = ..., +) -> np.bool | _MaskedArray[np.bool]: ... +@overload +def any[ArrayT: np.ndarray]( + a: ArrayLike, + axis: _ShapeLike | None, + out: ArrayT, + keepdims: bool | _NoValueType = ..., +) -> ArrayT: ... +@overload +def any[ArrayT: np.ndarray]( + a: ArrayLike, + axis: _ShapeLike | None = None, + *, + out: ArrayT, keepdims: bool | _NoValueType = ..., +) -> ArrayT: ... + +# NOTE: The `MaskedArray.compress` definition uses its `DTypeT_co` type parameter, +# which wouldn't work here for array-like inputs, so we need additional overloads. +@overload +def compress[ScalarT: np.generic]( + condition: _ArrayLikeBool_co, + a: _ArrayLike[ScalarT], + axis: None = None, + out: None = None, +) -> _Masked1D[ScalarT]: ... +@overload +def compress[ScalarT: np.generic]( + condition: _ArrayLikeBool_co, + a: _ArrayLike[ScalarT], + axis: _ShapeLike | None = None, + out: None = None, +) -> _MaskedArray[ScalarT]: ... +@overload +def compress(condition: _ArrayLikeBool_co, a: ArrayLike, axis: None = None, out: None = None) -> _Masked1D[Incomplete]: ... +@overload +def compress( + condition: _ArrayLikeBool_co, + a: ArrayLike, + axis: _ShapeLike | None = None, + out: None = None, +) -> _MaskedArray[Incomplete]: ... +@overload +def compress[ArrayT: np.ndarray](condition: _ArrayLikeBool_co, a: ArrayLike, axis: _ShapeLike | None, out: ArrayT) -> ArrayT: ... +@overload +def compress[ArrayT: np.ndarray]( + condition: _ArrayLikeBool_co, + a: ArrayLike, + axis: _ShapeLike | None = None, + *, + out: ArrayT, +) -> ArrayT: ... -# 0D float64 array -class MaskedConstant(MaskedArray[_AnyShape, dtype[float64]]): - def __new__(cls): ... - __class__: Any - def __array_finalize__(self, obj): ... - def __array_wrap__(self, obj, context=..., return_scalar=...): ... - def __format__(self, format_spec): ... - def __reduce__(self): ... - def __iop__(self, other): ... - __iadd__: Any - __isub__: Any - __imul__: Any - __ifloordiv__: Any - __itruediv__: Any - __ipow__: Any - def copy(self, *args, **kwargs): ... - def __copy__(self): ... - def __deepcopy__(self, memo): ... - def __setattr__(self, attr, value): ... - -masked: MaskedConstant -masked_singleton: MaskedConstant -masked_array = MaskedArray - -def array( - data, - dtype=..., - copy=..., - order=..., - mask=..., - fill_value=..., - keep_mask=..., - hard_mask=..., - shrink=..., - subok=..., - ndmin=..., -): ... -def is_masked(x: object) -> bool: ... +# Keep in sync with `cumprod` and `MaskedArray.cumsum` +@overload # out: None (default) +def cumsum( + a: ArrayLike, + axis: SupportsIndex | None = None, + dtype: DTypeLike | None = None, + out: None = None, +) -> _MaskedArray[Incomplete]: ... +@overload # out: ndarray (positional) +def cumsum[ArrayT: np.ndarray](a: ArrayLike, axis: SupportsIndex | None, dtype: DTypeLike | None, out: ArrayT) -> ArrayT: ... +@overload # out: ndarray (kwarg) +def cumsum[ArrayT: np.ndarray]( + a: ArrayLike, + axis: SupportsIndex | None = None, + dtype: DTypeLike | None = None, + *, + out: ArrayT, +) -> ArrayT: ... -class _extrema_operation(_MaskedUFunc): - compare: Any - fill_value_func: Any - def __init__(self, ufunc, compare, fill_value): ... - # NOTE: in practice `b` has a default value, but users should - # explicitly provide a value here as the default is deprecated - def __call__(self, a, b): ... - def reduce(self, target, axis=...): ... - def outer(self, a, b): ... +# Keep in sync with `cumsum` and `MaskedArray.cumsum` +@overload # out: None (default) +def cumprod( + a: ArrayLike, + axis: SupportsIndex | None = None, + dtype: DTypeLike | None = None, + out: None = None, +) -> _MaskedArray[Incomplete]: ... +@overload # out: ndarray (positional) +def cumprod[ArrayT: np.ndarray](a: ArrayLike, axis: SupportsIndex | None, dtype: DTypeLike | None, out: ArrayT) -> ArrayT: ... +@overload # out: ndarray (kwarg) +def cumprod[ArrayT: np.ndarray]( + a: ArrayLike, + axis: SupportsIndex | None = None, + dtype: DTypeLike | None = None, + *, + out: ArrayT, +) -> ArrayT: ... +# Keep in sync with `sum`, `prod`, `product`, and `MaskedArray.mean` @overload -def min( - obj: _ArrayLike[_ScalarT], - axis: None = None, +def mean( + a: ArrayLike, + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, out: None = None, - fill_value: _ScalarLike_co | None = None, - keepdims: Literal[False] | _NoValueType = ..., -) -> _ScalarT: ... + keepdims: bool | _NoValueType = ..., +) -> Incomplete: ... @overload -def min( - obj: ArrayLike, +def mean[ArrayT: np.ndarray]( + a: ArrayLike, + axis: _ShapeLike | None, + dtype: DTypeLike | None, + out: ArrayT, + keepdims: bool | _NoValueType = ..., +) -> ArrayT: ... +@overload +def mean[ArrayT: np.ndarray]( + a: ArrayLike, + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + *, + out: ArrayT, + keepdims: bool | _NoValueType = ..., +) -> ArrayT: ... + +# Keep in sync with `mean`, `prod`, `product`, and `MaskedArray.sum` +@overload +def sum( + a: ArrayLike, axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, out: None = None, - fill_value: _ScalarLike_co | None = None, - keepdims: bool | _NoValueType = ... -) -> Any: ... + keepdims: bool | _NoValueType = ..., +) -> Incomplete: ... @overload -def min( - obj: ArrayLike, +def sum[ArrayT: np.ndarray]( + a: ArrayLike, axis: _ShapeLike | None, - out: _ArrayT, - fill_value: _ScalarLike_co | None = None, + dtype: DTypeLike | None, + out: ArrayT, keepdims: bool | _NoValueType = ..., -) -> _ArrayT: ... +) -> ArrayT: ... @overload -def min( - obj: ArrayLike, +def sum[ArrayT: np.ndarray]( + a: ArrayLike, axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, *, - out: _ArrayT, - fill_value: _ScalarLike_co | None = None, + out: ArrayT, keepdims: bool | _NoValueType = ..., -) -> _ArrayT: ... +) -> ArrayT: ... +# Keep in sync with `product` and `MaskedArray.prod` @overload -def max( - obj: _ArrayLike[_ScalarT], - axis: None = None, +def prod( + a: ArrayLike, + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, out: None = None, - fill_value: _ScalarLike_co | None = None, - keepdims: Literal[False] | _NoValueType = ..., -) -> _ScalarT: ... + keepdims: bool | _NoValueType = ..., +) -> Incomplete: ... @overload -def max( - obj: ArrayLike, +def prod[ArrayT: np.ndarray]( + a: ArrayLike, + axis: _ShapeLike | None, + dtype: DTypeLike | None, + out: ArrayT, + keepdims: bool | _NoValueType = ..., +) -> ArrayT: ... +@overload +def prod[ArrayT: np.ndarray]( + a: ArrayLike, + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + *, + out: ArrayT, + keepdims: bool | _NoValueType = ..., +) -> ArrayT: ... + +# Keep in sync with `prod` and `MaskedArray.prod` +@overload +def product( + a: ArrayLike, axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, out: None = None, - fill_value: _ScalarLike_co | None = None, - keepdims: bool | _NoValueType = ... -) -> Any: ... + keepdims: bool | _NoValueType = ..., +) -> Incomplete: ... @overload -def max( - obj: ArrayLike, +def product[ArrayT: np.ndarray]( + a: ArrayLike, axis: _ShapeLike | None, - out: _ArrayT, - fill_value: _ScalarLike_co | None = None, + dtype: DTypeLike | None, + out: ArrayT, keepdims: bool | _NoValueType = ..., -) -> _ArrayT: ... +) -> ArrayT: ... @overload -def max( - obj: ArrayLike, +def product[ArrayT: np.ndarray]( + a: ArrayLike, axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, *, - out: _ArrayT, - fill_value: _ScalarLike_co | None = None, + out: ArrayT, keepdims: bool | _NoValueType = ..., -) -> _ArrayT: ... +) -> ArrayT: ... +# Keep in sync with `MaskedArray.trace` and `_core.fromnumeric.trace` @overload -def ptp( - obj: _ArrayLike[_ScalarT], - axis: None = None, +def trace( + a: ArrayLike, + offset: SupportsIndex = 0, + axis1: SupportsIndex = 0, + axis2: SupportsIndex = 1, + dtype: DTypeLike | None = None, out: None = None, - fill_value: _ScalarLike_co | None = None, - keepdims: Literal[False] | _NoValueType = ..., -) -> _ScalarT: ... +) -> Incomplete: ... @overload -def ptp( - obj: ArrayLike, +def trace[ArrayT: np.ndarray]( + a: ArrayLike, + offset: SupportsIndex, + axis1: SupportsIndex, + axis2: SupportsIndex, + dtype: DTypeLike | None, + out: ArrayT, +) -> ArrayT: ... +@overload +def trace[ArrayT: np.ndarray]( + a: ArrayLike, + offset: SupportsIndex = 0, + axis1: SupportsIndex = 0, + axis2: SupportsIndex = 1, + dtype: DTypeLike | None = None, + *, + out: ArrayT, +) -> ArrayT: ... + +# keep in sync with `std` and `MaskedArray.var` +@overload +def std( + a: ArrayLike, axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, out: None = None, - fill_value: _ScalarLike_co | None = None, - keepdims: bool | _NoValueType = ... -) -> Any: ... + ddof: float = 0, + keepdims: bool | _NoValueType = ..., + mean: _ArrayLikeNumber_co | _NoValueType = ..., +) -> Incomplete: ... @overload -def ptp( - obj: ArrayLike, +def std[ArrayT: np.ndarray]( + a: ArrayLike, axis: _ShapeLike | None, - out: _ArrayT, - fill_value: _ScalarLike_co | None = None, + dtype: DTypeLike | None, + out: ArrayT, + ddof: float = 0, keepdims: bool | _NoValueType = ..., -) -> _ArrayT: ... + mean: _ArrayLikeNumber_co | _NoValueType = ..., +) -> ArrayT: ... @overload -def ptp( - obj: ArrayLike, +def std[ArrayT: np.ndarray]( + a: ArrayLike, axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, *, - out: _ArrayT, - fill_value: _ScalarLike_co | None = None, + out: ArrayT, + ddof: float = 0, + keepdims: bool | _NoValueType = ..., + mean: _ArrayLikeNumber_co | _NoValueType = ..., +) -> ArrayT: ... + +# keep in sync with `std` and `MaskedArray.var` +@overload +def var( + a: ArrayLike, + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + out: None = None, + ddof: float = 0, + keepdims: bool | _NoValueType = ..., + mean: _ArrayLikeNumber_co | _NoValueType = ..., +) -> Incomplete: ... +@overload +def var[ArrayT: np.ndarray]( + a: ArrayLike, + axis: _ShapeLike | None, + dtype: DTypeLike | None, + out: ArrayT, + ddof: float = 0, keepdims: bool | _NoValueType = ..., -) -> _ArrayT: ... - -class _frommethod: - __name__: Any - __doc__: Any - reversed: Any - def __init__(self, methodname, reversed=...): ... - def getdoc(self): ... - def __call__(self, a, *args, **params): ... - -all: _frommethod -anomalies: _frommethod -anom: _frommethod -any: _frommethod -compress: _frommethod -cumprod: _frommethod -cumsum: _frommethod -copy: _frommethod -diagonal: _frommethod -harden_mask: _frommethod -ids: _frommethod -mean: _frommethod -nonzero: _frommethod -prod: _frommethod -product: _frommethod -ravel: _frommethod -repeat: _frommethod -soften_mask: _frommethod -std: _frommethod -sum: _frommethod -swapaxes: _frommethod -trace: _frommethod -var: _frommethod - -@overload -def count(self: ArrayLike, axis: None = None, keepdims: Literal[False] | _NoValueType = ...) -> int: ... -@overload -def count(self: ArrayLike, axis: _ShapeLike, keepdims: bool | _NoValueType = ...) -> NDArray[int_]: ... -@overload -def count(self: ArrayLike, axis: _ShapeLike | None = ..., *, keepdims: Literal[True]) -> NDArray[int_]: ... -@overload -def count(self: ArrayLike, axis: _ShapeLike | None, keepdims: Literal[True]) -> NDArray[int_]: ... + mean: _ArrayLikeNumber_co | _NoValueType = ..., +) -> ArrayT: ... +@overload +def var[ArrayT: np.ndarray]( + a: ArrayLike, + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + *, + out: ArrayT, + ddof: float = 0, + keepdims: bool | _NoValueType = ..., + mean: _ArrayLikeNumber_co | _NoValueType = ..., +) -> ArrayT: ... + +# (a, b) +minimum: _extrema_operation = ... +maximum: _extrema_operation = ... + +# NOTE: this is a `_frommethod` instance at runtime +@overload +def count(a: ArrayLike, axis: None = None, keepdims: Literal[False] | _NoValueType = ...) -> int: ... +@overload +def count(a: ArrayLike, axis: _ShapeLike, keepdims: bool | _NoValueType = ...) -> NDArray[int_]: ... +@overload +def count(a: ArrayLike, axis: _ShapeLike | None = None, *, keepdims: Literal[True]) -> NDArray[int_]: ... +@overload +def count(a: ArrayLike, axis: _ShapeLike | None, keepdims: Literal[True]) -> NDArray[int_]: ... +# NOTE: this is a `_frommethod` instance at runtime @overload def argmin( - self: ArrayLike, + a: ArrayLike, axis: None = None, fill_value: _ScalarLike_co | None = None, out: None = None, @@ -1297,7 +3255,7 @@ def argmin( ) -> intp: ... @overload def argmin( - self: ArrayLike, + a: ArrayLike, axis: SupportsIndex | None = None, fill_value: _ScalarLike_co | None = None, out: None = None, @@ -1305,28 +3263,28 @@ def argmin( keepdims: bool | _NoValueType = ..., ) -> Any: ... @overload -def argmin( - self: ArrayLike, +def argmin[ArrayT: np.ndarray]( + a: ArrayLike, axis: SupportsIndex | None = None, fill_value: _ScalarLike_co | None = None, *, - out: _ArrayT, + out: ArrayT, keepdims: bool | _NoValueType = ..., -) -> _ArrayT: ... +) -> ArrayT: ... @overload -def argmin( - self: ArrayLike, +def argmin[ArrayT: np.ndarray]( + a: ArrayLike, axis: SupportsIndex | None, fill_value: _ScalarLike_co | None, - out: _ArrayT, + out: ArrayT, *, keepdims: bool | _NoValueType = ..., -) -> _ArrayT: ... +) -> ArrayT: ... -# +# keep in sync with `argmin` @overload def argmax( - self: ArrayLike, + a: ArrayLike, axis: None = None, fill_value: _ScalarLike_co | None = None, out: None = None, @@ -1335,7 +3293,7 @@ def argmax( ) -> intp: ... @overload def argmax( - self: ArrayLike, + a: ArrayLike, axis: SupportsIndex | None = None, fill_value: _ScalarLike_co | None = None, out: None = None, @@ -1343,50 +3301,47 @@ def argmax( keepdims: bool | _NoValueType = ..., ) -> Any: ... @overload -def argmax( - self: ArrayLike, +def argmax[ArrayT: np.ndarray]( + a: ArrayLike, axis: SupportsIndex | None = None, fill_value: _ScalarLike_co | None = None, *, - out: _ArrayT, + out: ArrayT, keepdims: bool | _NoValueType = ..., -) -> _ArrayT: ... +) -> ArrayT: ... @overload -def argmax( - self: ArrayLike, +def argmax[ArrayT: np.ndarray]( + a: ArrayLike, axis: SupportsIndex | None, fill_value: _ScalarLike_co | None, - out: _ArrayT, + out: ArrayT, *, keepdims: bool | _NoValueType = ..., -) -> _ArrayT: ... - -minimum: _extrema_operation -maximum: _extrema_operation +) -> ArrayT: ... @overload -def take( - a: _ArrayLike[_ScalarT], +def take[ScalarT: np.generic]( + a: _ArrayLike[ScalarT], indices: _IntLike_co, axis: None = None, out: None = None, - mode: _ModeKind = 'raise' -) -> _ScalarT: ... + mode: _ModeKind = "raise", +) -> ScalarT: ... @overload -def take( - a: _ArrayLike[_ScalarT], +def take[ScalarT: np.generic]( + a: _ArrayLike[ScalarT], indices: _ArrayLikeInt_co, axis: SupportsIndex | None = None, out: None = None, - mode: _ModeKind = 'raise', -) -> _MaskedArray[_ScalarT]: ... + mode: _ModeKind = "raise", +) -> _MaskedArray[ScalarT]: ... @overload def take( a: ArrayLike, indices: _IntLike_co, axis: SupportsIndex | None = None, out: None = None, - mode: _ModeKind = 'raise', + mode: _ModeKind = "raise", ) -> Any: ... @overload def take( @@ -1394,39 +3349,40 @@ def take( indices: _ArrayLikeInt_co, axis: SupportsIndex | None = None, out: None = None, - mode: _ModeKind = 'raise', + mode: _ModeKind = "raise", ) -> _MaskedArray[Any]: ... @overload -def take( +def take[ArrayT: np.ndarray]( a: ArrayLike, indices: _ArrayLikeInt_co, axis: SupportsIndex | None, - out: _ArrayT, - mode: _ModeKind = 'raise', -) -> _ArrayT: ... + out: ArrayT, + mode: _ModeKind = "raise", +) -> ArrayT: ... @overload -def take( +def take[ArrayT: np.ndarray]( a: ArrayLike, indices: _ArrayLikeInt_co, axis: SupportsIndex | None = None, *, - out: _ArrayT, - mode: _ModeKind = 'raise', -) -> _ArrayT: ... + out: ArrayT, + mode: _ModeKind = "raise", +) -> ArrayT: ... + +def power(a, b, third=None): ... +def argsort(a, axis=..., kind=None, order=None, endwith=True, fill_value=None, *, stable=None): ... -def power(a, b, third=...): ... -def argsort(a, axis=..., kind=..., order=..., endwith=..., fill_value=..., *, stable=...): ... @overload -def sort( - a: _ArrayT, +def sort[ArrayT: np.ndarray]( + a: ArrayT, axis: SupportsIndex = -1, kind: _SortKind | None = None, order: str | Sequence[str] | None = None, endwith: bool | None = True, fill_value: _ScalarLike_co | None = None, *, - stable: Literal[False] | None = False, -) -> _ArrayT: ... + stable: Literal[False] | None = None, +) -> ArrayT: ... @overload def sort( a: ArrayLike, @@ -1436,28 +3392,30 @@ def sort( endwith: bool | None = True, fill_value: _ScalarLike_co | None = None, *, - stable: Literal[False] | None = False, + stable: Literal[False] | None = None, ) -> NDArray[Any]: ... + @overload -def compressed(x: _ArrayLike[_ScalarT_co]) -> _Array1D[_ScalarT_co]: ... +def compressed[ScalarT: np.generic](x: _ArrayLike[ScalarT]) -> _Array1D[ScalarT]: ... @overload def compressed(x: ArrayLike) -> _Array1D[Any]: ... -def concatenate(arrays, axis=...): ... -def diag(v, k=...): ... + +def concatenate(arrays, axis=0): ... +def diag(v, k=0): ... def left_shift(a, n): ... def right_shift(a, n): ... -def put(a: NDArray[Any], indices: _ArrayLikeInt_co, values: ArrayLike, mode: _ModeKind = 'raise') -> None: ... +def put(a: NDArray[Any], indices: _ArrayLikeInt_co, values: ArrayLike, mode: _ModeKind = "raise") -> None: ... def putmask(a: NDArray[Any], mask: _ArrayLikeBool_co, values: ArrayLike) -> None: ... -def transpose(a, axes=...): ... -def reshape(a, new_shape, order=...): ... +def transpose(a, axes=None): ... +def reshape(a, new_shape, order="C"): ... def resize(x, new_shape): ... def ndim(obj: ArrayLike) -> int: ... def shape(obj): ... def size(obj: ArrayLike, axis: SupportsIndex | None = None) -> int: ... -def diff(a, /, n=..., axis=..., prepend=..., append=...): ... +def diff(a, /, n=1, axis=-1, prepend=..., append=...): ... def where(condition, x=..., y=...): ... -def choose(indices, choices, out=..., mode=...): ... -def round_(a, decimals=..., out=...): ... +def choose(indices, choices, out=None, mode="raise"): ... +def round_(a, decimals=0, out=None): ... round = round_ def inner(a, b): ... @@ -1466,36 +3424,547 @@ innerproduct = inner def outer(a, b): ... outerproduct = outer -def correlate(a, v, mode=..., propagate_mask=...): ... -def convolve(a, v, mode=..., propagate_mask=...): ... +def correlate(a, v, mode="valid", propagate_mask=True): ... +def convolve(a, v, mode="full", propagate_mask=True): ... def allequal(a: ArrayLike, b: ArrayLike, fill_value: bool = True) -> bool: ... def allclose(a: ArrayLike, b: ArrayLike, masked_equal: bool = True, rtol: float = 1e-5, atol: float = 1e-8) -> bool: ... -def asarray(a, dtype=..., order=...): ... -def asanyarray(a, dtype=...): ... def fromflex(fxarray): ... -class _convert2ma: - def __init__(self, /, funcname: str, np_ret: str, np_ma_ret: str, params: dict[str, Any] | None = None) -> None: ... - def __call__(self, /, *args: object, **params: object) -> Any: ... - def getdoc(self, /, np_ret: str, np_ma_ret: str) -> str | None: ... - -arange: _convert2ma -clip: _convert2ma -empty: _convert2ma -empty_like: _convert2ma -frombuffer: _convert2ma -fromfunction: _convert2ma -identity: _convert2ma -indices: _convert2ma -ones: _convert2ma -ones_like: _convert2ma -squeeze: _convert2ma -zeros: _convert2ma -zeros_like: _convert2ma - -def append(a, b, axis=...): ... -def dot(a, b, strict=..., out=...): ... -def mask_rowcols(a, axis=...): ... +def append(a, b, axis=None): ... +def dot(a, b, strict=False, out=None): ... + +# internal wrapper functions for the functions below +def _convert2ma( + funcname: str, + np_ret: str, + np_ma_ret: str, + params: dict[str, Any] | None = None, +) -> Callable[..., Any]: ... + +# keep in sync with `_core.multiarray.arange` +@overload # dtype= +def arange[ScalarT: _ArangeScalar]( + start_or_stop: _ArangeScalar | float, + /, + stop: _ArangeScalar | float | None = None, + step: _ArangeScalar | float | None = 1, + *, + dtype: _DTypeLike[ScalarT], + device: Literal["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, + fill_value: _FillValue | None = None, + hardmask: bool = False, +) -> _Masked1D[ScalarT]: ... +@overload # (int-like, int-like?, int-like?) +def arange( + start_or_stop: _IntLike_co, + /, + stop: _IntLike_co | None = None, + step: _IntLike_co | None = 1, + *, + dtype: type[int] | _DTypeLike[np.int_] | None = None, + device: Literal["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, + fill_value: _FillValue | None = None, + hardmask: bool = False, +) -> _Masked1D[np.int_]: ... +@overload # (float, float-like?, float-like?) +def arange( + start_or_stop: float | floating, + /, + stop: _FloatLike_co | None = None, + step: _FloatLike_co | None = 1, + *, + dtype: type[float] | _DTypeLike[np.float64] | None = None, + device: Literal["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, + fill_value: _FillValue | None = None, + hardmask: bool = False, +) -> _Masked1D[np.float64 | Any]: ... +@overload # (float-like, float, float-like?) +def arange( + start_or_stop: _FloatLike_co, + /, + stop: float | floating, + step: _FloatLike_co | None = 1, + *, + dtype: type[float] | _DTypeLike[np.float64] | None = None, + device: Literal["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, + fill_value: _FillValue | None = None, + hardmask: bool = False, +) -> _Masked1D[np.float64 | Any]: ... +@overload # (timedelta, timedelta-like?, timedelta-like?) +def arange( + start_or_stop: np.timedelta64, + /, + stop: _TD64Like_co | None = None, + step: _TD64Like_co | None = 1, + *, + dtype: _DTypeLike[np.timedelta64] | None = None, + device: Literal["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, + fill_value: _FillValue | None = None, + hardmask: bool = False, +) -> _Masked1D[np.timedelta64[Incomplete]]: ... +@overload # (timedelta-like, timedelta, timedelta-like?) +def arange( + start_or_stop: _TD64Like_co, + /, + stop: np.timedelta64, + step: _TD64Like_co | None = 1, + *, + dtype: _DTypeLike[np.timedelta64] | None = None, + device: Literal["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, + fill_value: _FillValue | None = None, + hardmask: bool = False, +) -> _Masked1D[np.timedelta64[Incomplete]]: ... +@overload # (datetime, datetime, timedelta-like) (requires both start and stop) +def arange( + start_or_stop: np.datetime64, + /, + stop: np.datetime64, + step: _TD64Like_co | None = 1, + *, + dtype: _DTypeLike[np.datetime64] | None = None, + device: Literal["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, + fill_value: _FillValue | None = None, + hardmask: bool = False, +) -> _Masked1D[np.datetime64[Incomplete]]: ... +@overload # dtype= +def arange( + start_or_stop: _ArangeScalar | float, + /, + stop: _ArangeScalar | float | None = None, + step: _ArangeScalar | float | None = 1, + *, + dtype: DTypeLike | None = None, + device: Literal["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, + fill_value: _FillValue | None = None, + hardmask: bool = False, +) -> _Masked1D[Incomplete]: ... + +# based on `_core.fromnumeric.clip` +@overload +def clip[ScalarT: np.generic]( + a: ScalarT, + a_min: ArrayLike | _NoValueType | None = ..., + a_max: ArrayLike | _NoValueType | None = ..., + out: None = None, + *, + min: ArrayLike | _NoValueType | None = ..., + max: ArrayLike | _NoValueType | None = ..., + fill_value: _FillValue | None = None, + hardmask: bool = False, + dtype: None = None, + **kwargs: Unpack[_UFuncKwargs], +) -> ScalarT: ... +@overload +def clip[ScalarT: np.generic]( + a: NDArray[ScalarT], + a_min: ArrayLike | _NoValueType | None = ..., + a_max: ArrayLike | _NoValueType | None = ..., + out: None = None, + *, + min: ArrayLike | _NoValueType | None = ..., + max: ArrayLike | _NoValueType | None = ..., + fill_value: _FillValue | None = None, + hardmask: bool = False, + dtype: None = None, + **kwargs: Unpack[_UFuncKwargs], +) -> _MaskedArray[ScalarT]: ... +@overload +def clip[MArrayT: MaskedArray]( + a: ArrayLike, + a_min: ArrayLike | None, + a_max: ArrayLike | None, + out: MArrayT, + *, + min: ArrayLike | _NoValueType | None = ..., + max: ArrayLike | _NoValueType | None = ..., + fill_value: _FillValue | None = None, + hardmask: bool = False, + dtype: DTypeLike | None = None, + **kwargs: Unpack[_UFuncKwargs], +) -> MArrayT: ... +@overload +def clip[MArrayT: MaskedArray]( + a: ArrayLike, + a_min: ArrayLike | _NoValueType | None = ..., + a_max: ArrayLike | _NoValueType | None = ..., + *, + out: MArrayT, + min: ArrayLike | _NoValueType | None = ..., + max: ArrayLike | _NoValueType | None = ..., + fill_value: _FillValue | None = None, + hardmask: bool = False, + dtype: DTypeLike | None = None, + **kwargs: Unpack[_UFuncKwargs], +) -> MArrayT: ... +@overload +def clip( + a: ArrayLike, + a_min: ArrayLike | _NoValueType | None = ..., + a_max: ArrayLike | _NoValueType | None = ..., + out: None = None, + *, + min: ArrayLike | _NoValueType | None = ..., + max: ArrayLike | _NoValueType | None = ..., + fill_value: _FillValue | None = None, + hardmask: bool = False, + dtype: DTypeLike | None = None, + **kwargs: Unpack[_UFuncKwargs], +) -> Incomplete: ... + +# keep in sync with `_core.multiarray.ones` +@overload +def empty( + shape: SupportsIndex, + dtype: None = None, + order: _OrderCF = "C", + *, + device: Literal["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, + fill_value: _FillValue | None = None, + hardmask: bool = False, +) -> _Masked1D[np.float64]: ... +@overload +def empty[DTypeT: np.dtype]( + shape: SupportsIndex, + dtype: DTypeT | _SupportsDType[DTypeT], + order: _OrderCF = "C", + *, + device: Literal["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, + fill_value: _FillValue | None = None, + hardmask: bool = False, +) -> MaskedArray[tuple[int], DTypeT]: ... +@overload +def empty[ScalarT: np.generic]( + shape: SupportsIndex, + dtype: type[ScalarT], + order: _OrderCF = "C", + *, + device: Literal["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, + fill_value: _FillValue | None = None, + hardmask: bool = False, +) -> _Masked1D[ScalarT]: ... +@overload +def empty( + shape: SupportsIndex, + dtype: DTypeLike | None = None, + order: _OrderCF = "C", + *, + device: Literal["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, + fill_value: _FillValue | None = None, + hardmask: bool = False, +) -> _Masked1D[Any]: ... +@overload # known shape +def empty[ShapeT: _Shape]( + shape: ShapeT, + dtype: None = None, + order: _OrderCF = "C", + *, + device: Literal["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, + fill_value: _FillValue | None = None, + hardmask: bool = False, +) -> MaskedArray[ShapeT, np.dtype[np.float64]]: ... +@overload +def empty[ShapeT: _Shape, DTypeT: np.dtype]( + shape: ShapeT, + dtype: DTypeT | _SupportsDType[DTypeT], + order: _OrderCF = "C", + *, + device: Literal["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, + fill_value: _FillValue | None = None, + hardmask: bool = False, +) -> MaskedArray[ShapeT, DTypeT]: ... +@overload +def empty[ShapeT: _Shape, ScalarT: np.generic]( + shape: ShapeT, + dtype: type[ScalarT], + order: _OrderCF = "C", + *, + device: Literal["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, + fill_value: _FillValue | None = None, + hardmask: bool = False, +) -> MaskedArray[ShapeT, np.dtype[ScalarT]]: ... +@overload +def empty[ShapeT: _Shape]( + shape: ShapeT, + dtype: DTypeLike | None = None, + order: _OrderCF = "C", + *, + device: Literal["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, + fill_value: _FillValue | None = None, + hardmask: bool = False, +) -> MaskedArray[ShapeT]: ... +@overload # unknown shape +def empty[ShapeT: _Shape]( + shape: _ShapeLike, + dtype: None = None, + order: _OrderCF = "C", + *, + device: Literal["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, + fill_value: _FillValue | None = None, + hardmask: bool = False, +) -> _MaskedArray[np.float64]: ... +@overload +def empty[DTypeT: np.dtype]( + shape: _ShapeLike, + dtype: DTypeT | _SupportsDType[DTypeT], + order: _OrderCF = "C", + *, + device: Literal["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, + fill_value: _FillValue | None = None, + hardmask: bool = False, +) -> MaskedArray[_AnyShape, DTypeT]: ... +@overload +def empty[ScalarT: np.generic]( + shape: _ShapeLike, + dtype: type[ScalarT], + order: _OrderCF = "C", + *, + device: Literal["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, + fill_value: _FillValue | None = None, + hardmask: bool = False, +) -> _MaskedArray[ScalarT]: ... +@overload +def empty( + shape: _ShapeLike, + dtype: DTypeLike | None = None, + *, + device: Literal["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, + fill_value: _FillValue | None = None, + hardmask: bool = False, +) -> MaskedArray: ... + +# keep in sync with `_core.multiarray.empty_like` +@overload +def empty_like[MArrayT: MaskedArray]( + a: MArrayT, + /, + dtype: None = None, + order: _OrderKACF = "K", + subok: bool = True, + shape: _ShapeLike | None = None, + *, + device: Literal["cpu"] | None = None, +) -> MArrayT: ... +@overload +def empty_like[ScalarT: np.generic]( + a: _ArrayLike[ScalarT], + /, + dtype: None = None, + order: _OrderKACF = "K", + subok: bool = True, + shape: _ShapeLike | None = None, + *, + device: Literal["cpu"] | None = None, +) -> _MaskedArray[ScalarT]: ... +@overload +def empty_like[ScalarT: np.generic]( + a: Incomplete, + /, + dtype: _DTypeLike[ScalarT], + order: _OrderKACF = "K", + subok: bool = True, + shape: _ShapeLike | None = None, + *, + device: Literal["cpu"] | None = None, +) -> _MaskedArray[ScalarT]: ... +@overload +def empty_like( + a: Incomplete, + /, + dtype: DTypeLike | None = None, + order: _OrderKACF = "K", + subok: bool = True, + shape: _ShapeLike | None = None, + *, + device: Literal["cpu"] | None = None, +) -> _MaskedArray[Incomplete]: ... + +# This is a bit of a hack to avoid having to duplicate all those `empty` overloads for +# `ones` and `zeros`, that relies on the fact that empty/zeros/ones have identical +# type signatures, but may cause some type-checkers to report incorrect names in case +# of user errors. Mypy and Pyright seem to handle this just fine. +ones = empty +ones_like = empty_like +zeros = empty +zeros_like = empty_like + +# keep in sync with `_core.multiarray.frombuffer` +@overload +def frombuffer( + buffer: Buffer, + *, + count: SupportsIndex = -1, + offset: SupportsIndex = 0, + like: _SupportsArrayFunc | None = None, +) -> _MaskedArray[np.float64]: ... +@overload +def frombuffer[ScalarT: np.generic]( + buffer: Buffer, + dtype: _DTypeLike[ScalarT], + count: SupportsIndex = -1, + offset: SupportsIndex = 0, + *, + like: _SupportsArrayFunc | None = None, +) -> _MaskedArray[ScalarT]: ... +@overload +def frombuffer( + buffer: Buffer, + dtype: DTypeLike | None = float, + count: SupportsIndex = -1, + offset: SupportsIndex = 0, + *, + like: _SupportsArrayFunc | None = None, +) -> _MaskedArray[Incomplete]: ... + +# keep roughly in sync with `_core.numeric.fromfunction` +def fromfunction[ShapeT: _Shape, DTypeT: np.dtype]( + function: Callable[..., np.ndarray[ShapeT, DTypeT]], + shape: Sequence[int], + *, + dtype: DTypeLike | None = float, + like: _SupportsArrayFunc | None = None, + **kwargs: object, +) -> MaskedArray[ShapeT, DTypeT]: ... + +# keep roughly in sync with `_core.numeric.identity` +@overload +def identity( + n: int, + dtype: None = None, + *, + like: _SupportsArrayFunc | None = None, + fill_value: _FillValue | None = None, + hardmask: bool = False, +) -> MaskedArray[tuple[int, int], np.dtype[np.float64]]: ... +@overload +def identity[ScalarT: np.generic]( + n: int, + dtype: _DTypeLike[ScalarT], + *, + like: _SupportsArrayFunc | None = None, + fill_value: _FillValue | None = None, + hardmask: bool = False, +) -> MaskedArray[tuple[int, int], np.dtype[ScalarT]]: ... +@overload +def identity( + n: int, + dtype: DTypeLike | None = None, + *, + like: _SupportsArrayFunc | None = None, + fill_value: _FillValue | None = None, + hardmask: bool = False, +) -> MaskedArray[tuple[int, int], np.dtype[Incomplete]]: ... + +# keep roughly in sync with `_core.numeric.indices` +@overload +def indices( + dimensions: Sequence[int], + dtype: type[int] = int, + sparse: Literal[False] = False, + *, + fill_value: _FillValue | None = None, + hardmask: bool = False, +) -> _MaskedArray[np.intp]: ... +@overload +def indices( + dimensions: Sequence[int], + dtype: type[int], + sparse: Literal[True], + *, + fill_value: _FillValue | None = None, + hardmask: bool = False, +) -> tuple[_MaskedArray[np.intp], ...]: ... +@overload +def indices( + dimensions: Sequence[int], + dtype: type[int] = int, + *, + sparse: Literal[True], + fill_value: _FillValue | None = None, + hardmask: bool = False, +) -> tuple[_MaskedArray[np.intp], ...]: ... +@overload +def indices[ScalarT: np.generic]( + dimensions: Sequence[int], + dtype: _DTypeLike[ScalarT], + sparse: Literal[False] = False, + *, + fill_value: _FillValue | None = None, + hardmask: bool = False, +) -> _MaskedArray[ScalarT]: ... +@overload +def indices[ScalarT: np.generic]( + dimensions: Sequence[int], + dtype: _DTypeLike[ScalarT], + sparse: Literal[True], + *, + fill_value: _FillValue | None = None, + hardmask: bool = False, +) -> tuple[_MaskedArray[ScalarT], ...]: ... +@overload +def indices( + dimensions: Sequence[int], + dtype: DTypeLike | None = int, + sparse: Literal[False] = False, + *, + fill_value: _FillValue | None = None, + hardmask: bool = False, +) -> _MaskedArray[Incomplete]: ... +@overload +def indices( + dimensions: Sequence[int], + dtype: DTypeLike | None, + sparse: Literal[True], + *, + fill_value: _FillValue | None = None, + hardmask: bool = False, +) -> tuple[_MaskedArray[Incomplete], ...]: ... +@overload +def indices( + dimensions: Sequence[int], + dtype: DTypeLike | None = int, + *, + sparse: Literal[True], + fill_value: _FillValue | None = None, + hardmask: bool = False, +) -> tuple[_MaskedArray[Incomplete], ...]: ... + +# keep roughly in sync with `_core.fromnumeric.squeeze` +@overload +def squeeze[ScalarT: np.generic]( + a: _ArrayLike[ScalarT], + axis: _ShapeLike | None = None, + *, + fill_value: _FillValue | None = None, + hardmask: bool = False, +) -> _MaskedArray[ScalarT]: ... +@overload +def squeeze( + a: ArrayLike, + axis: _ShapeLike | None = None, + *, + fill_value: _FillValue | None = None, + hardmask: bool = False, +) -> _MaskedArray[Incomplete]: ... diff --git a/numpy/ma/extras.py b/numpy/ma/extras.py index 094c1e26b191..769c38fdc900 100644 --- a/numpy/ma/extras.py +++ b/numpy/ma/extras.py @@ -19,12 +19,12 @@ 'setdiff1d', 'setxor1d', 'stack', 'unique', 'union1d', 'vander', 'vstack', ] +import functools import itertools import warnings import numpy as np -from numpy import array as nxarray -from numpy import ndarray +from numpy import array as nxarray, ndarray from numpy.lib._function_base_impl import _ureduce from numpy.lib._index_tricks_impl import AxisConcatenator from numpy.lib.array_utils import normalize_axis_index, normalize_axis_tuple @@ -245,151 +245,93 @@ def masked_all_like(arr): #####-------------------------------------------------------------------------- #---- --- Standard functions --- #####-------------------------------------------------------------------------- -class _fromnxfunction: - """ - Defines a wrapper to adapt NumPy functions to masked arrays. - - An instance of `_fromnxfunction` can be called with the same parameters - as the wrapped NumPy function. The docstring of `newfunc` is adapted from - the wrapped function as well, see `getdoc`. - - This class should not be used directly. Instead, one of its extensions that - provides support for a specific type of input should be used. +def _fromnxfunction_function(_fromnxfunction): + """ + Decorator to wrap a "_fromnxfunction" function, wrapping a numpy function as a + masked array function, with proper docstring and name. Parameters ---------- - funcname : str - The name of the function to be adapted. The function should be - in the NumPy namespace (i.e. ``np.funcname``). + _fromnxfunction : ({params}) -> ndarray, {params}) -> masked_array + Wrapper function that calls the wrapped numpy function - """ + Returns + ------- + decorator : (f: ({params}) -> ndarray) -> ({params}) -> masked_array + Function that accepts a numpy function and returns a masked array function - def __init__(self, funcname): - self.__name__ = funcname - self.__qualname__ = funcname - self.__doc__ = self.getdoc() + """ + def decorator(npfunc, /): + def wrapper(*args, **kwargs): + return _fromnxfunction(npfunc, *args, **kwargs) - def getdoc(self): - """ - Retrieve the docstring and signature from the function. + functools.update_wrapper(wrapper, npfunc, assigned=("__name__", "__qualname__")) + wrapper.__doc__ = ma.doc_note( + npfunc.__doc__, + "The function is applied to both the ``_data`` and the ``_mask``, if any.", + ) + return wrapper - The ``__doc__`` attribute of the function is used as the docstring for - the new masked array version of the function. A note on application - of the function to the mask is appended. + return decorator - Parameters - ---------- - None - """ - npfunc = getattr(np, self.__name__, None) - doc = getattr(npfunc, '__doc__', None) - if doc: - sig = ma.get_object_signature(npfunc) - doc = ma.doc_note(doc, "The function is applied to both the _data " - "and the _mask, if any.") - if sig: - sig = self.__name__ + sig + "\n\n" - return sig + doc - return +@_fromnxfunction_function +def _fromnxfunction_single(npfunc, a, /, *args, **kwargs): + """ + Wraps a NumPy function that can be called with a single array argument followed by + auxiliary args that are passed verbatim for both the data and mask calls. + """ + return masked_array( + data=npfunc(np.asarray(a), *args, **kwargs), + mask=npfunc(getmaskarray(a), *args, **kwargs), + ) - def __call__(self, *args, **params): - pass +@_fromnxfunction_function +def _fromnxfunction_seq(npfunc, arys, /, *args, **kwargs): + """ + Wraps a NumPy function that can be called with a single sequence of arrays followed + by auxiliary args that are passed verbatim for both the data and mask calls. + """ + return masked_array( + data=npfunc(tuple(np.asarray(a) for a in arys), *args, **kwargs), + mask=npfunc(tuple(getmaskarray(a) for a in arys), *args, **kwargs), + ) -class _fromnxfunction_single(_fromnxfunction): +@_fromnxfunction_function +def _fromnxfunction_allargs(npfunc, /, *arys, **kwargs): """ - A version of `_fromnxfunction` that is called with a single array - argument followed by auxiliary args that are passed verbatim for - both the data and mask calls. + Wraps a NumPy function that can be called with multiple array arguments. + All args are converted to arrays even if they are not so already. + This makes it possible to process scalars as 1-D arrays. + Only keyword arguments are passed through verbatim for the data and mask calls. + Arrays arguments are processed independently and the results are returned in a list. + If only one arg is present, the return value is just the processed array instead of + a list. """ - def __call__(self, x, *args, **params): - func = getattr(np, self.__name__) - if isinstance(x, ndarray): - _d = func(x.__array__(), *args, **params) - _m = func(getmaskarray(x), *args, **params) - return masked_array(_d, mask=_m) - else: - _d = func(np.asarray(x), *args, **params) - _m = func(getmaskarray(x), *args, **params) - return masked_array(_d, mask=_m) - - -class _fromnxfunction_seq(_fromnxfunction): - """ - A version of `_fromnxfunction` that is called with a single sequence - of arrays followed by auxiliary args that are passed verbatim for - both the data and mask calls. - """ - def __call__(self, x, *args, **params): - func = getattr(np, self.__name__) - _d = func(tuple(np.asarray(a) for a in x), *args, **params) - _m = func(tuple(getmaskarray(a) for a in x), *args, **params) - return masked_array(_d, mask=_m) - - -class _fromnxfunction_args(_fromnxfunction): - """ - A version of `_fromnxfunction` that is called with multiple array - arguments. The first non-array-like input marks the beginning of the - arguments that are passed verbatim for both the data and mask calls. - Array arguments are processed independently and the results are - returned in a list. If only one array is found, the return value is - just the processed array instead of a list. - """ - def __call__(self, *args, **params): - func = getattr(np, self.__name__) - arrays = [] - args = list(args) - while len(args) > 0 and issequence(args[0]): - arrays.append(args.pop(0)) - res = [] - for x in arrays: - _d = func(np.asarray(x), *args, **params) - _m = func(getmaskarray(x), *args, **params) - res.append(masked_array(_d, mask=_m)) - if len(arrays) == 1: - return res[0] - return res - - -class _fromnxfunction_allargs(_fromnxfunction): - """ - A version of `_fromnxfunction` that is called with multiple array - arguments. Similar to `_fromnxfunction_args` except that all args - are converted to arrays even if they are not so already. This makes - it possible to process scalars as 1-D arrays. Only keyword arguments - are passed through verbatim for the data and mask calls. Arrays - arguments are processed independently and the results are returned - in a list. If only one arg is present, the return value is just the - processed array instead of a list. - """ - def __call__(self, *args, **params): - func = getattr(np, self.__name__) - res = [] - for x in args: - _d = func(np.asarray(x), **params) - _m = func(getmaskarray(x), **params) - res.append(masked_array(_d, mask=_m)) - if len(args) == 1: - return res[0] - return res - - -atleast_1d = _fromnxfunction_allargs('atleast_1d') -atleast_2d = _fromnxfunction_allargs('atleast_2d') -atleast_3d = _fromnxfunction_allargs('atleast_3d') - -vstack = row_stack = _fromnxfunction_seq('vstack') -hstack = _fromnxfunction_seq('hstack') -column_stack = _fromnxfunction_seq('column_stack') -dstack = _fromnxfunction_seq('dstack') -stack = _fromnxfunction_seq('stack') - -hsplit = _fromnxfunction_single('hsplit') - -diagflat = _fromnxfunction_single('diagflat') + out = tuple( + masked_array( + data=npfunc(np.asarray(a), **kwargs), + mask=npfunc(getmaskarray(a), **kwargs), + ) + for a in arys + ) + return out[0] if len(out) == 1 else out + + +atleast_1d = _fromnxfunction_allargs(np.atleast_1d) +atleast_2d = _fromnxfunction_allargs(np.atleast_2d) +atleast_3d = _fromnxfunction_allargs(np.atleast_3d) + +vstack = row_stack = _fromnxfunction_seq(np.vstack) +hstack = _fromnxfunction_seq(np.hstack) +column_stack = _fromnxfunction_seq(np.column_stack) +dstack = _fromnxfunction_seq(np.dstack) +stack = _fromnxfunction_seq(np.stack) + +hsplit = _fromnxfunction_single(np.hsplit) +diagflat = _fromnxfunction_single(np.diagflat) #####-------------------------------------------------------------------------- @@ -1136,7 +1078,7 @@ def mask_rowcols(a, axis=None): Examples -------- >>> import numpy as np - >>> a = np.zeros((3, 3), dtype=int) + >>> a = np.zeros((3, 3), dtype=np.int_) >>> a[1, 1] = 1 >>> a array([[0, 0, 0], @@ -1193,7 +1135,7 @@ def mask_rows(a, axis=np._NoValue): Examples -------- >>> import numpy as np - >>> a = np.zeros((3, 3), dtype=int) + >>> a = np.zeros((3, 3), dtype=np.int_) >>> a[1, 1] = 1 >>> a array([[0, 0, 0], @@ -1244,7 +1186,7 @@ def mask_cols(a, axis=np._NoValue): Examples -------- >>> import numpy as np - >>> a = np.zeros((3, 3), dtype=int) + >>> a = np.zeros((3, 3), dtype=np.int_) >>> a[1, 1] = 1 >>> a array([[0, 0, 0], @@ -1447,14 +1389,13 @@ def in1d(ar1, ar2, assume_unique=False, invert=False): Test whether each element of an array is also present in a second array. - The output is always a masked array. See `numpy.in1d` for more details. + The output is always a masked array. We recommend using :func:`isin` instead of `in1d` for new code. See Also -------- isin : Version of this function that preserves the shape of ar1. - numpy.in1d : Equivalent function for ndarrays. Examples -------- @@ -1731,8 +1672,8 @@ def cov(x, y=None, rowvar=True, bias=False, allow_masked=True, ddof=None): return result -def corrcoef(x, y=None, rowvar=True, bias=np._NoValue, allow_masked=True, - ddof=np._NoValue): +def corrcoef(x, y=None, rowvar=True, allow_masked=True, + ): """ Return Pearson product-moment correlation coefficients. @@ -1753,32 +1694,17 @@ def corrcoef(x, y=None, rowvar=True, bias=np._NoValue, allow_masked=True, variable, with observations in the columns. Otherwise, the relationship is transposed: each column represents a variable, while the rows contain observations. - bias : _NoValue, optional - Has no effect, do not use. - - .. deprecated:: 1.10.0 allow_masked : bool, optional If True, masked values are propagated pair-wise: if a value is masked in `x`, the corresponding value is masked in `y`. If False, raises an exception. Because `bias` is deprecated, this argument needs to be treated as keyword only to avoid a warning. - ddof : _NoValue, optional - Has no effect, do not use. - - .. deprecated:: 1.10.0 See Also -------- numpy.corrcoef : Equivalent function in top-level NumPy module. cov : Estimate the covariance matrix. - Notes - ----- - This function accepts but discards arguments `bias` and `ddof`. This is - for backwards compatibility with previous versions of this function. These - arguments had no effect on the return values of the function and can be - safely ignored in this and previous versions of numpy. - Examples -------- >>> import numpy as np @@ -1793,10 +1719,6 @@ def corrcoef(x, y=None, rowvar=True, bias=np._NoValue, allow_masked=True, dtype=float64) """ - msg = 'bias and ddof have no effect and are deprecated' - if bias is not np._NoValue or ddof is not np._NoValue: - # 2015-03-15, 1.10 - warnings.warn(msg, DeprecationWarning, stacklevel=2) # Estimate the covariance matrix. corr = cov(x, y, rowvar, allow_masked=allow_masked) # The non-masked version returns a masked value for a scalar. diff --git a/numpy/ma/extras.pyi b/numpy/ma/extras.pyi index c3f9fcde4a0a..83f3bee761ce 100644 --- a/numpy/ma/extras.pyi +++ b/numpy/ma/extras.pyi @@ -1,6 +1,17 @@ from _typeshed import Incomplete +from collections.abc import Sequence +from typing import SupportsIndex, overload import numpy as np +from numpy import _CastingKind +from numpy._typing import ( + ArrayLike, + DTypeLike, + _AnyShape, + _ArrayLike, + _DTypeLike, + _ShapeLike, +) from numpy.lib._function_base_impl import average from numpy.lib._index_tricks_impl import AxisConcatenator @@ -55,80 +66,234 @@ __all__ = [ "vstack", ] -def count_masked(arr, axis=...): ... -def masked_all(shape, dtype=...): ... -def masked_all_like(arr): ... +type _MArray[ScalarT: np.generic] = MaskedArray[_AnyShape, np.dtype[ScalarT]] + +### + +# keep in sync with `numpy._core.shape_base.atleast_1d` +@overload +def atleast_1d[ScalarT: np.generic](a0: _ArrayLike[ScalarT], /) -> _MArray[ScalarT]: ... +@overload +def atleast_1d[ScalarT1: np.generic, ScalarT2: np.generic]( + a0: _ArrayLike[ScalarT1], a1: _ArrayLike[ScalarT2], / +) -> tuple[_MArray[ScalarT1], _MArray[ScalarT2]]: ... +@overload +def atleast_1d[ScalarT: np.generic]( + a0: _ArrayLike[ScalarT], a1: _ArrayLike[ScalarT], /, *arys: _ArrayLike[ScalarT] +) -> tuple[_MArray[ScalarT], ...]: ... +@overload +def atleast_1d(a0: ArrayLike, /) -> _MArray[Incomplete]: ... +@overload +def atleast_1d(a0: ArrayLike, a1: ArrayLike, /) -> tuple[_MArray[Incomplete], _MArray[Incomplete]]: ... +@overload +def atleast_1d(a0: ArrayLike, a1: ArrayLike, /, *ai: ArrayLike) -> tuple[_MArray[Incomplete], ...]: ... + +# keep in sync with `numpy._core.shape_base.atleast_2d` +@overload +def atleast_2d[ScalarT: np.generic](a0: _ArrayLike[ScalarT], /) -> _MArray[ScalarT]: ... +@overload +def atleast_2d[ScalarT1: np.generic, ScalarT2: np.generic]( + a0: _ArrayLike[ScalarT1], a1: _ArrayLike[ScalarT2], / +) -> tuple[_MArray[ScalarT1], _MArray[ScalarT2]]: ... +@overload +def atleast_2d[ScalarT: np.generic]( + a0: _ArrayLike[ScalarT], a1: _ArrayLike[ScalarT], /, *arys: _ArrayLike[ScalarT] +) -> tuple[_MArray[ScalarT], ...]: ... +@overload +def atleast_2d(a0: ArrayLike, /) -> _MArray[Incomplete]: ... +@overload +def atleast_2d(a0: ArrayLike, a1: ArrayLike, /) -> tuple[_MArray[Incomplete], _MArray[Incomplete]]: ... +@overload +def atleast_2d(a0: ArrayLike, a1: ArrayLike, /, *ai: ArrayLike) -> tuple[_MArray[Incomplete], ...]: ... + +# keep in sync with `numpy._core.shape_base.atleast_2d` +@overload +def atleast_3d[ScalarT: np.generic](a0: _ArrayLike[ScalarT], /) -> _MArray[ScalarT]: ... +@overload +def atleast_3d[ScalarT1: np.generic, ScalarT2: np.generic]( + a0: _ArrayLike[ScalarT1], a1: _ArrayLike[ScalarT2], / +) -> tuple[_MArray[ScalarT1], _MArray[ScalarT2]]: ... +@overload +def atleast_3d[ScalarT: np.generic]( + a0: _ArrayLike[ScalarT], a1: _ArrayLike[ScalarT], /, *arys: _ArrayLike[ScalarT] +) -> tuple[_MArray[ScalarT], ...]: ... +@overload +def atleast_3d(a0: ArrayLike, /) -> _MArray[Incomplete]: ... +@overload +def atleast_3d(a0: ArrayLike, a1: ArrayLike, /) -> tuple[_MArray[Incomplete], _MArray[Incomplete]]: ... +@overload +def atleast_3d(a0: ArrayLike, a1: ArrayLike, /, *ai: ArrayLike) -> tuple[_MArray[Incomplete], ...]: ... + +# keep in sync with `numpy._core.shape_base.vstack` +@overload +def vstack[ScalarT: np.generic]( + tup: Sequence[_ArrayLike[ScalarT]], + *, + dtype: None = None, + casting: _CastingKind = "same_kind" +) -> _MArray[ScalarT]: ... +@overload +def vstack[ScalarT: np.generic]( + tup: Sequence[ArrayLike], + *, + dtype: _DTypeLike[ScalarT], + casting: _CastingKind = "same_kind" +) -> _MArray[ScalarT]: ... +@overload +def vstack( + tup: Sequence[ArrayLike], + *, + dtype: DTypeLike | None = None, + casting: _CastingKind = "same_kind" +) -> _MArray[Incomplete]: ... -class _fromnxfunction: - __name__: Incomplete - __doc__: Incomplete - def __init__(self, funcname) -> None: ... - def getdoc(self): ... - def __call__(self, *args, **params): ... +row_stack = vstack -class _fromnxfunction_single(_fromnxfunction): - def __call__(self, x, *args, **params): ... +# keep in sync with `numpy._core.shape_base.hstack` +@overload +def hstack[ScalarT: np.generic]( + tup: Sequence[_ArrayLike[ScalarT]], + *, + dtype: None = None, + casting: _CastingKind = "same_kind" +) -> _MArray[ScalarT]: ... +@overload +def hstack[ScalarT: np.generic]( + tup: Sequence[ArrayLike], + *, + dtype: _DTypeLike[ScalarT], + casting: _CastingKind = "same_kind" +) -> _MArray[ScalarT]: ... +@overload +def hstack( + tup: Sequence[ArrayLike], + *, + dtype: DTypeLike | None = None, + casting: _CastingKind = "same_kind" +) -> _MArray[Incomplete]: ... -class _fromnxfunction_seq(_fromnxfunction): - def __call__(self, x, *args, **params): ... +# keep in sync with `numpy._core.shape_base_impl.column_stack` +@overload +def column_stack[ScalarT: np.generic](tup: Sequence[_ArrayLike[ScalarT]]) -> _MArray[ScalarT]: ... +@overload +def column_stack(tup: Sequence[ArrayLike]) -> _MArray[Incomplete]: ... -class _fromnxfunction_allargs(_fromnxfunction): - def __call__(self, *args, **params): ... +# keep in sync with `numpy._core.shape_base_impl.dstack` +@overload +def dstack[ScalarT: np.generic](tup: Sequence[_ArrayLike[ScalarT]]) -> _MArray[ScalarT]: ... +@overload +def dstack(tup: Sequence[ArrayLike]) -> _MArray[Incomplete]: ... -atleast_1d: _fromnxfunction_allargs -atleast_2d: _fromnxfunction_allargs -atleast_3d: _fromnxfunction_allargs +# keep in sync with `numpy._core.shape_base.stack` +@overload +def stack[ScalarT: np.generic]( + arrays: Sequence[_ArrayLike[ScalarT]], + axis: SupportsIndex = 0, + out: None = None, + *, + dtype: None = None, + casting: _CastingKind = "same_kind" +) -> _MArray[ScalarT]: ... +@overload +def stack[ScalarT: np.generic]( + arrays: Sequence[ArrayLike], + axis: SupportsIndex = 0, + out: None = None, + *, + dtype: _DTypeLike[ScalarT], + casting: _CastingKind = "same_kind" +) -> _MArray[ScalarT]: ... +@overload +def stack( + arrays: Sequence[ArrayLike], + axis: SupportsIndex = 0, + out: None = None, + *, + dtype: DTypeLike | None = None, + casting: _CastingKind = "same_kind" +) -> _MArray[Incomplete]: ... +@overload +def stack[MArrayT: MaskedArray]( + arrays: Sequence[ArrayLike], + axis: SupportsIndex, + out: MArrayT, + *, + dtype: DTypeLike | None = None, + casting: _CastingKind = "same_kind", +) -> MArrayT: ... +@overload +def stack[MArrayT: MaskedArray]( + arrays: Sequence[ArrayLike], + axis: SupportsIndex = 0, + *, + out: MArrayT, + dtype: DTypeLike | None = None, + casting: _CastingKind = "same_kind", +) -> MArrayT: ... -vstack: _fromnxfunction_seq -row_stack: _fromnxfunction_seq -hstack: _fromnxfunction_seq -column_stack: _fromnxfunction_seq -dstack: _fromnxfunction_seq -stack: _fromnxfunction_seq +# keep in sync with `numpy._core.shape_base_impl.hsplit` +@overload +def hsplit[ScalarT: np.generic](ary: _ArrayLike[ScalarT], indices_or_sections: _ShapeLike) -> list[_MArray[ScalarT]]: ... +@overload +def hsplit(ary: ArrayLike, indices_or_sections: _ShapeLike) -> list[_MArray[Incomplete]]: ... -hsplit: _fromnxfunction_single -diagflat: _fromnxfunction_single +# keep in sync with `numpy._core.twodim_base_impl.hsplit` +@overload +def diagflat[ScalarT: np.generic](v: _ArrayLike[ScalarT], k: int = 0) -> _MArray[ScalarT]: ... +@overload +def diagflat(v: ArrayLike, k: int = 0) -> _MArray[Incomplete]: ... + +# TODO: everything below +# mypy: disable-error-code=no-untyped-def + +def count_masked(arr, axis=None): ... +def masked_all(shape, dtype=float): ... # noqa: PYI014 +def masked_all_like(arr): ... def apply_along_axis(func1d, axis, arr, *args, **kwargs): ... def apply_over_axes(func, a, axes): ... -def median(a, axis=..., out=..., overwrite_input=..., keepdims=...): ... -def compress_nd(x, axis=...): ... -def compress_rowcols(x, axis=...): ... +def median(a, axis=None, out=None, overwrite_input=False, keepdims=False): ... +def compress_nd(x, axis=None): ... +def compress_rowcols(x, axis=None): ... def compress_rows(a): ... def compress_cols(a): ... def mask_rows(a, axis=...): ... def mask_cols(a, axis=...): ... -def ediff1d(arr, to_end=..., to_begin=...): ... -def unique(ar1, return_index=..., return_inverse=...): ... -def intersect1d(ar1, ar2, assume_unique=...): ... -def setxor1d(ar1, ar2, assume_unique=...): ... -def in1d(ar1, ar2, assume_unique=..., invert=...): ... -def isin(element, test_elements, assume_unique=..., invert=...): ... +def ediff1d(arr, to_end=None, to_begin=None): ... +def unique(ar1, return_index=False, return_inverse=False): ... +def intersect1d(ar1, ar2, assume_unique=False): ... +def setxor1d(ar1, ar2, assume_unique=False): ... +def in1d(ar1, ar2, assume_unique=False, invert=False): ... +def isin(element, test_elements, assume_unique=False, invert=False): ... def union1d(ar1, ar2): ... -def setdiff1d(ar1, ar2, assume_unique=...): ... -def cov(x, y=..., rowvar=..., bias=..., allow_masked=..., ddof=...): ... -def corrcoef(x, y=..., rowvar=..., bias=..., allow_masked=..., ddof=...): ... +def setdiff1d(ar1, ar2, assume_unique=False): ... +def cov(x, y=None, rowvar=True, bias=False, allow_masked=True, ddof=None): ... +def corrcoef(x, y=None, rowvar=True, allow_masked=True): ... class MAxisConcatenator(AxisConcatenator): + __slots__ = () + @staticmethod def concatenate(arrays: Incomplete, axis: int = 0) -> Incomplete: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride] @classmethod def makemat(cls, arr: Incomplete) -> Incomplete: ... # type: ignore[override] # pyright: ignore[reportIncompatibleVariableOverride] class mr_class(MAxisConcatenator): + __slots__ = () + def __init__(self) -> None: ... mr_: mr_class -def ndenumerate(a, compressed=...): ... +def ndenumerate(a, compressed=True): ... def flatnotmasked_edges(a): ... -def notmasked_edges(a, axis=...): ... +def notmasked_edges(a, axis=None): ... def flatnotmasked_contiguous(a): ... -def notmasked_contiguous(a, axis=...): ... +def notmasked_contiguous(a, axis=None): ... def clump_unmasked(a): ... def clump_masked(a): ... -def vander(x, n=...): ... -def polyfit(x, y, deg, rcond=..., full=..., w=..., cov=...): ... +def vander(x, n=None): ... +def polyfit(x, y, deg, rcond=None, full=False, w=None, cov=False): ... # def mask_rowcols(a: Incomplete, axis: Incomplete | None = None) -> MaskedArray[Incomplete, np.dtype[Incomplete]]: ... diff --git a/numpy/ma/mrecords.py b/numpy/ma/mrecords.py index 835f3ce5b772..bb4a2707fec1 100644 --- a/numpy/ma/mrecords.py +++ b/numpy/ma/mrecords.py @@ -657,8 +657,7 @@ def openfile(fname): def fromtextfile(fname, delimiter=None, commentchar='#', missingchar='', - varnames=None, vartypes=None, - *, delimitor=np._NoValue): # backwards compatibility + varnames=None, vartypes=None): """ Creates a mrecarray from data stored in the file `filename`. @@ -682,16 +681,6 @@ def fromtextfile(fname, delimiter=None, commentchar='#', missingchar='', Ultra simple: the varnames are in the header, one line""" - if delimitor is not np._NoValue: - if delimiter is not None: - raise TypeError("fromtextfile() got multiple values for argument " - "'delimiter'") - # NumPy 1.22.0, 2021-09-23 - warnings.warn("The 'delimitor' keyword argument of " - "numpy.ma.mrecords.fromtextfile() is deprecated " - "since NumPy 1.22.0, use 'delimiter' instead.", - DeprecationWarning, stacklevel=2) - delimiter = delimitor # Try to open the file. ftext = openfile(fname) diff --git a/numpy/ma/mrecords.pyi b/numpy/ma/mrecords.pyi index cae687aa7d1a..f1319d4bf69d 100644 --- a/numpy/ma/mrecords.pyi +++ b/numpy/ma/mrecords.pyi @@ -1,8 +1,10 @@ -from typing import Any, TypeVar +from typing import Any, Generic +from typing_extensions import TypeVar -from numpy import dtype +import numpy as np +from numpy._typing import _AnyShape -from . import MaskedArray +from .core import MaskedArray __all__ = [ "MaskedRecords", @@ -13,10 +15,13 @@ __all__ = [ "addfield", ] -_ShapeT_co = TypeVar("_ShapeT_co", covariant=True, bound=tuple[int, ...]) -_DTypeT_co = TypeVar("_DTypeT_co", bound=dtype, covariant=True) +_ShapeT_co = TypeVar("_ShapeT_co", bound=tuple[int, ...], default=_AnyShape, covariant=True) +_DTypeT_co = TypeVar("_DTypeT_co", bound=np.dtype, default=np.dtype, covariant=True) -class MaskedRecords(MaskedArray[_ShapeT_co, _DTypeT_co]): +### +# mypy: disable-error-code=no-untyped-def + +class MaskedRecords(MaskedArray[_ShapeT_co, _DTypeT_co], Generic[_ShapeT_co, _DTypeT_co]): def __new__( cls, shape, @@ -48,49 +53,47 @@ class MaskedRecords(MaskedArray[_ShapeT_co, _DTypeT_co]): def __setattr__(self, attr, val): ... def __getitem__(self, indx): ... def __setitem__(self, indx, value): ... - def view(self, dtype=..., type=...): ... + def view(self, dtype=None, type=None): ... # type: ignore[override] def harden_mask(self): ... def soften_mask(self): ... - def copy(self): ... - def tolist(self, fill_value=...): ... + def copy(self): ... # type: ignore[override] + def tolist(self, fill_value=None): ... def __reduce__(self): ... mrecarray = MaskedRecords def fromarrays( arraylist, - dtype=..., - shape=..., - formats=..., - names=..., - titles=..., - aligned=..., - byteorder=..., - fill_value=..., + dtype=None, + shape=None, + formats=None, + names=None, + titles=None, + aligned=False, + byteorder=None, + fill_value=None, ): ... def fromrecords( reclist, - dtype=..., - shape=..., - formats=..., - names=..., - titles=..., - aligned=..., - byteorder=..., - fill_value=..., + dtype=None, + shape=None, + formats=None, + names=None, + titles=None, + aligned=False, + byteorder=None, + fill_value=None, mask=..., ): ... def fromtextfile( fname, - delimiter=..., - commentchar=..., - missingchar=..., - varnames=..., - vartypes=..., - # NOTE: deprecated: NumPy 1.22.0, 2021-09-23 - # delimitor=..., + delimiter=None, + commentchar="#", + missingchar="", + varnames=None, + vartypes=None, ): ... -def addfield(mrecord, newfield, newfieldname=...): ... +def addfield(mrecord, newfield, newfieldname=None): ... diff --git a/numpy/ma/tests/test_core.py b/numpy/ma/tests/test_core.py index 091ba6c99fff..a082f8aa7450 100644 --- a/numpy/ma/tests/test_core.py +++ b/numpy/ma/tests/test_core.py @@ -6,6 +6,7 @@ __author__ = "Pierre GF Gerard-Marchant" import copy +import inspect import itertools import operator import pickle @@ -139,33 +140,24 @@ assert_not_equal, fail_if_equal, ) -from numpy.testing import ( - IS_WASM, - assert_raises, - assert_warns, - suppress_warnings, - temppath, -) +from numpy.testing import IS_WASM, assert_raises, temppath from numpy.testing._private.utils import requires_memory pi = np.pi -suppress_copy_mask_on_assignment = suppress_warnings() -suppress_copy_mask_on_assignment.filter( - numpy.ma.core.MaskedArrayFutureWarning, - "setting an item on a masked array which has a shared mask will not copy") - - # For parametrized numeric testing num_dts = [np.dtype(dt_) for dt_ in '?bhilqBHILQefdgFD'] num_ids = [dt_.char for dt_ in num_dts] - +WARNING_MESSAGE = ("setting an item on a masked array which has a shared " + "mask will not copy") +WARNING_MARK_SPEC = f"ignore:.*{WARNING_MESSAGE}:numpy.ma.core.MaskedArrayFutureWarning" class TestMaskedArray: # Base test class for MaskedArrays. - def setup_method(self): + # message for warning filters + def _create_data(self): # Base data definition. x = np.array([1., 1., 1., -2., pi / 2.0, 4., 5., -10., 10., 1., 2., 3.]) y = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.]) @@ -178,7 +170,7 @@ def setup_method(self): zm = masked_array(z, mask=[0, 1, 0, 0]) xf = np.where(m1, 1e+20, x) xm.set_fill_value(1e+20) - self.d = (x, y, a10, m1, m2, xm, ym, z, zm, xf) + return x, y, a10, m1, m2, xm, ym, z, zm, xf def test_basicattributes(self): # Tests some basic array attributes. @@ -204,7 +196,7 @@ def test_basic0d(self): def test_basic1d(self): # Test of basic array creation and properties in 1 dimension. - (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d + x, _, _, m1, _, xm, ym, z, zm, xf = self._create_data() assert_(not isMaskedArray(x)) assert_(isMaskedArray(xm)) assert_((xm - ym).filled(0).any()) @@ -222,13 +214,13 @@ def test_basic1d(self): def test_basic2d(self): # Test of basic array creation and properties in 2 dimensions. - (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d + x, y, _, m1, _, xm, ym, _, _, xf = self._create_data() for s in [(4, 3), (6, 2)]: - x.shape = s - y.shape = s - xm.shape = s - ym.shape = s - xf.shape = s + x = x.reshape(s) + y = y.reshape(s) + xm = xm.reshape(s) + ym = ym.reshape(s) + xf = xf.reshape(s) assert_(not isMaskedArray(x)) assert_(isMaskedArray(xm)) @@ -242,7 +234,7 @@ def test_basic2d(self): def test_concatenate_basic(self): # Tests concatenations. - (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d + x, y, _, _, _, xm, ym, _, _, _ = self._create_data() # basic concatenation assert_equal(np.concatenate((x, y)), concatenate((xm, ym))) assert_equal(np.concatenate((x, y)), concatenate((x, y))) @@ -251,10 +243,15 @@ def test_concatenate_basic(self): def test_concatenate_alongaxis(self): # Tests concatenations. - (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d + x, y, _, m1, m2, xm, ym, z, _, xf = self._create_data() # Concatenation along an axis s = (3, 4) - x.shape = y.shape = xm.shape = ym.shape = s + x = x.reshape(s) + y = y.reshape(s) + xm = xm.reshape(s) + ym = ym.reshape(s) + xf = xf.reshape(s) + assert_equal(xm.mask, np.reshape(m1, s)) assert_equal(ym.mask, np.reshape(m2, s)) xmym = concatenate((xm, ym), 1) @@ -370,7 +367,7 @@ def test_unknown_keyword_parameter(self): MaskedArray([1, 2, 3], maks=[0, 1, 0]) # `mask` is misspelled. def test_asarray(self): - (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d + xm = self._create_data()[5] xm.fill_value = -9999 xm._hardmask = True xmm = asarray(xm) @@ -492,7 +489,7 @@ def test_setitem_no_warning(self): x[...] = value x[[0, 1, 2]] = value - @suppress_copy_mask_on_assignment + @pytest.mark.filterwarnings(WARNING_MARK_SPEC) def test_copy(self): # Tests of some subtle points of copying and sizing. n = [0, 0, 1, 0, 0] @@ -511,9 +508,9 @@ def test_copy(self): y1a = array(y1) # Default for masked array is not to copy; see gh-10318. assert_(y1a._data.__array_interface__ == - y1._data.__array_interface__) + y1._data.__array_interface__) assert_(y1a._mask.__array_interface__ == - y1._mask.__array_interface__) + y1._mask.__array_interface__) y2 = array(x1, mask=m3) assert_(y2._data.__array_interface__ == x1.__array_interface__) @@ -612,7 +609,7 @@ def test_format(self): # assert_equal(format(masked, " <5"), "-- ") # Expect a FutureWarning for using format_spec with MaskedElement - with assert_warns(FutureWarning): + with pytest.warns(FutureWarning): with_format_string = format(masked, " >5") assert_equal(with_format_string, "--") @@ -770,8 +767,7 @@ def test_pickling_wstructured(self): def test_pickling_keepalignment(self): # Tests pickling w/ F_CONTIGUOUS arrays - a = arange(10) - a.shape = (-1, 2) + a = arange(10).reshape( (-1, 2)) b = a.T for proto in range(2, pickle.HIGHEST_PROTOCOL + 1): test = pickle.loads(pickle.dumps(b, protocol=proto)) @@ -793,8 +789,9 @@ def test_topython(self): assert_equal(1.0, float(array([[1]]))) assert_raises(TypeError, float, array([1, 1])) - with suppress_warnings() as sup: - sup.filter(UserWarning, 'Warning: converting a masked element') + with warnings.catch_warnings(): + warnings.filterwarnings( + 'ignore', 'Warning: converting a masked element', UserWarning) assert_(np.isnan(float(array([1], mask=[1])))) a = array([1, 2, 3], mask=[1, 0, 0]) @@ -847,14 +844,17 @@ def test_oddfeatures_2(self): assert_(z[1] is not masked) assert_(z[2] is masked) - @suppress_copy_mask_on_assignment def test_oddfeatures_3(self): - # Tests some generic features - atest = array([10], mask=True) - btest = array([20]) - idx = atest.mask - atest[idx] = btest[idx] - assert_equal(atest, [20]) + msg = "setting an item on a masked array which has a shared mask will not copy" + with warnings.catch_warnings(): + warnings.filterwarnings( + 'ignore', msg, numpy.ma.core.MaskedArrayFutureWarning) + # Tests some generic features + atest = array([10], mask=True) + btest = array([20]) + idx = atest.mask + atest[idx] = btest[idx] + assert_equal(atest, [20]) def test_filled_with_object_dtype(self): a = np.ma.masked_all(1, dtype='O') @@ -1047,6 +1047,7 @@ def test_mvoid_iter(self): # w/ mask assert_equal(list(a[1]), [masked, 4]) + @pytest.mark.thread_unsafe(reason="masked_print_option.set_display global state") def test_mvoid_print(self): # Test printing a mvoid mx = array([(1, 1), (2, 2)], dtype=[('a', int), ('b', int)]) @@ -1064,6 +1065,7 @@ def test_mvoid_print(self): mx = array([(1,), (2,)], dtype=[('a', 'O')]) assert_equal(str(mx[0]), "(1,)") + @pytest.mark.thread_unsafe(reason="masked_print_option global state") def test_mvoid_multidim_print(self): # regression test for gh-6019 @@ -1123,8 +1125,7 @@ def test_maskedarray_tofile_raises_notimplementederror(self): class TestMaskedArrayArithmetic: # Base test class for MaskedArrays. - - def setup_method(self): + def _create_data(self): # Base data definition. x = np.array([1., 1., 1., -2., pi / 2.0, 4., 5., -10., 10., 1., 2., 3.]) y = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.]) @@ -1137,16 +1138,18 @@ def setup_method(self): zm = masked_array(z, mask=[0, 1, 0, 0]) xf = np.where(m1, 1e+20, x) xm.set_fill_value(1e+20) - self.d = (x, y, a10, m1, m2, xm, ym, z, zm, xf) - self.err_status = np.geterr() - np.seterr(divide='ignore', invalid='ignore') + return x, y, a10, m1, m2, xm, ym, z, zm, xf - def teardown_method(self): - np.seterr(**self.err_status) + @pytest.fixture(autouse=True, scope="class") + def err_status(self): + err = np.geterr() + np.seterr(divide='ignore', invalid='ignore') + yield err + np.seterr(**err) def test_basic_arithmetic(self): # Test of basic arithmetic. - (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d + x, y, a10, _, _, xm, ym, _, _, xf = self._create_data() a2d = array([[1, 2], [0, 4]]) a2dm = masked_array(a2d, [[0, 0], [1, 0]]) assert_equal(a2d * a2d, a2d * a2dm) @@ -1180,8 +1183,7 @@ def test_basic_arithmetic(self): assert_equal(np.divide(x, y), divide(xm, ym)) def test_divide_on_different_shapes(self): - x = arange(6, dtype=float) - x.shape = (2, 3) + x = arange(6, dtype=float).reshape((2, 3)) y = arange(3, dtype=float) z = x / y @@ -1257,7 +1259,7 @@ def test_scalar_arithmetic(self): def test_basic_ufuncs(self): # Test various functions such as sin, cos. - (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d + x, y, _, _, _, xm, ym, z, zm, _ = self._create_data() assert_equal(np.cos(x), cos(xm)) assert_equal(np.cosh(x), cosh(xm)) assert_equal(np.sin(x), sin(xm)) @@ -1319,7 +1321,7 @@ def test_count_on_python_builtins(self): def test_minmax_func(self): # Tests minimum and maximum. - (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d + x, y, _, _, _, xm, _, _, _, _ = self._create_data() # max doesn't work if shaped xr = np.ravel(x) xmr = ravel(xm) @@ -1391,7 +1393,7 @@ def test_minmax_funcs_with_output(self): def test_minmax_methods(self): # Additional tests on max/min - (_, _, _, _, _, xm, _, _, _, _) = self.d + xm = self._create_data()[5] xm.shape = (xm.size,) assert_equal(xm.max(), 10) assert_(xm[0].max() is masked) @@ -1492,7 +1494,7 @@ def minmax_with_mask(arr, mask): def test_addsumprod(self): # Tests add, sum, product. - (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d + x, y, _, _, _, xm, ym, _, _, _ = self._create_data() assert_equal(np.add.reduce(x), add.reduce(x)) assert_equal(np.add.accumulate(x), add.accumulate(x)) assert_equal(4, sum(array(4), axis=0)) @@ -1613,7 +1615,7 @@ def test_noshink_on_creation(self): def test_mod(self): # Tests mod - (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d + x, y, _, _, _, xm, ym, _, _, _ = self._create_data() assert_equal(mod(x, y), mod(xm, ym)) test = mod(ym, xm) assert_equal(test, np.mod(ym, xm)) @@ -1811,7 +1813,7 @@ def test_eq_ne_structured_extra(self): el_by_el = [m1[name] != m2[name] for name in dt.names] assert_equal(array(el_by_el, dtype=bool).any(), ne_expected) - @pytest.mark.parametrize('dt', ['S', 'U']) + @pytest.mark.parametrize('dt', ['S', 'U', 'T']) @pytest.mark.parametrize('fill', [None, 'A']) def test_eq_for_strings(self, dt, fill): # Test the equality of structured arrays @@ -1843,7 +1845,7 @@ def test_eq_for_strings(self, dt, fill): assert_equal(test.mask, [True, False]) assert_(test.fill_value == True) - @pytest.mark.parametrize('dt', ['S', 'U']) + @pytest.mark.parametrize('dt', ['S', 'U', 'T']) @pytest.mark.parametrize('fill', [None, 'A']) def test_ne_for_strings(self, dt, fill): # Test the equality of structured arrays @@ -1993,17 +1995,26 @@ def test_comparisons_for_numeric(self, op, dt1, dt2, fill): assert_equal(test.mask, [True, False]) assert_(test.fill_value == True) + @pytest.mark.parametrize('dt', ['S', 'U', 'T']) @pytest.mark.parametrize('op', [operator.le, operator.lt, operator.ge, operator.gt]) @pytest.mark.parametrize('fill', [None, "N/A"]) - def test_comparisons_strings(self, op, fill): + def test_comparisons_strings(self, dt, op, fill): # See gh-21770, mask propagation is broken for strings (and some other # cases) so we explicitly test strings here. # In principle only == and != may need special handling... - ma1 = masked_array(["a", "b", "cde"], mask=[0, 1, 0], fill_value=fill) - ma2 = masked_array(["cde", "b", "a"], mask=[0, 1, 0], fill_value=fill) + ma1 = masked_array(["a", "b", "cde"], mask=[0, 1, 0], fill_value=fill, dtype=dt) + ma2 = masked_array(["cde", "b", "a"], mask=[0, 1, 0], fill_value=fill, dtype=dt) + assert_equal(op(ma1, ma2)._data, op(ma1._data, ma2._data)) + + if isinstance(fill, str): + fill = np.array(fill, dtype=dt) + + ma1 = masked_array(["a", "b", "cde"], mask=[0, 1, 0], fill_value=fill, dtype=dt) + ma2 = masked_array(["cde", "b", "a"], mask=[0, 1, 0], fill_value=fill, dtype=dt) assert_equal(op(ma1, ma2)._data, op(ma1._data, ma2._data)) + @pytest.mark.filterwarnings("ignore:.*Comparison to `None`.*:FutureWarning") def test_eq_with_None(self): # Really, comparisons with None should not be done, but check them # anyway. Note that pep8 will flag these tests. @@ -2011,23 +2022,21 @@ def test_eq_with_None(self): # test will fail (and have to be changed accordingly). # With partial mask - with suppress_warnings() as sup: - sup.filter(FutureWarning, "Comparison to `None`") - a = array([None, 1], mask=[0, 1]) - assert_equal(a == None, array([True, False], mask=[0, 1])) # noqa: E711 - assert_equal(a.data == None, [True, False]) # noqa: E711 - assert_equal(a != None, array([False, True], mask=[0, 1])) # noqa: E711 - # With nomask - a = array([None, 1], mask=False) - assert_equal(a == None, [True, False]) # noqa: E711 - assert_equal(a != None, [False, True]) # noqa: E711 - # With complete mask - a = array([None, 2], mask=True) - assert_equal(a == None, array([False, True], mask=True)) # noqa: E711 - assert_equal(a != None, array([True, False], mask=True)) # noqa: E711 - # Fully masked, even comparison to None should return "masked" - a = masked - assert_equal(a == None, masked) # noqa: E711 + a = array([None, 1], mask=[0, 1]) + assert_equal(a == None, array([True, False], mask=[0, 1])) # noqa: E711 + assert_equal(a.data == None, [True, False]) # noqa: E711 + assert_equal(a != None, array([False, True], mask=[0, 1])) # noqa: E711 + # With nomask + a = array([None, 1], mask=False) + assert_equal(a == None, [True, False]) # noqa: E711 + assert_equal(a != None, [False, True]) # noqa: E711 + # With complete mask + a = array([None, 2], mask=True) + assert_equal(a == None, array([False, True], mask=True)) # noqa: E711 + assert_equal(a != None, array([True, False], mask=True)) # noqa: E711 + # Fully masked, even comparison to None should return "masked" + a = masked + assert_equal(a == None, masked) # noqa: E711 def test_eq_with_scalar(self): a = array(1) @@ -2383,16 +2392,15 @@ def test_fillvalue(self): assert_equal(x.fill_value, 999.) assert_equal(x._fill_value, np.array(999.)) + @pytest.mark.filterwarnings("ignore:.*Numpy has detected.*:FutureWarning") def test_subarray_fillvalue(self): # gh-10483 test multi-field index fill value fields = array([(1, 1, 1)], dtype=[('i', int), ('s', '|S8'), ('f', float)]) - with suppress_warnings() as sup: - sup.filter(FutureWarning, "Numpy has detected") - subfields = fields[['i', 'f']] - assert_equal(tuple(subfields.fill_value), (999999, 1.e+20)) - # test comparison does not raise: - subfields[1:] == subfields[:-1] + subfields = fields[['i', 'f']] + assert_equal(tuple(subfields.fill_value), (999999, 1.e+20)) + # test comparison does not raise: + subfields[1:] == subfields[:-1] def test_fillvalue_exotic_dtype(self): # Tests yet more exotic flexible dtypes @@ -2585,16 +2593,17 @@ def test_fillvalue_bytes_or_str(self): class TestUfuncs: # Test class for the application of ufuncs on MaskedArrays. - - def setup_method(self): + def _create_data(self): # Base data definition. - self.d = (array([1.0, 0, -1, pi / 2] * 2, mask=[0, 1] + [0] * 6), + return (array([1.0, 0, -1, pi / 2] * 2, mask=[0, 1] + [0] * 6), array([1.0, 0, -1, pi / 2] * 2, mask=[1, 0] + [0] * 6),) - self.err_status = np.geterr() - np.seterr(divide='ignore', invalid='ignore') - def teardown_method(self): - np.seterr(**self.err_status) + @pytest.fixture(autouse=True, scope="class") + def err_status(self): + err = np.geterr() + np.seterr(divide='ignore', invalid='ignore') + yield err + np.seterr(**err) def test_testUfuncRegression(self): # Tests new ufuncs on MaskedArrays. @@ -2620,7 +2629,7 @@ def test_testUfuncRegression(self): except AttributeError: uf = getattr(fromnumeric, f) mf = getattr(numpy.ma.core, f) - args = self.d[:uf.nin] + args = self._create_data()[:uf.nin] ur = uf(*args) mr = mf(*args) assert_equal(ur.filled(0), mr.filled(0), f) @@ -2628,7 +2637,7 @@ def test_testUfuncRegression(self): def test_reduce(self): # Tests reduce on MaskedArrays. - a = self.d[0] + a = self._create_data()[0] assert_(not alltrue(a, axis=0)) assert_(sometrue(a, axis=0)) assert_equal(sum(a[:3], axis=0), 0) @@ -2732,34 +2741,41 @@ def test_masked_array_underflow(self): X2 = X / 2.0 np.testing.assert_array_equal(X2, x / 2) + class TestMaskedArrayInPlaceArithmetic: # Test MaskedArray Arithmetic - - def setup_method(self): + def _create_intdata(self): x = arange(10) y = arange(10) xm = arange(10) xm[2] = masked - self.intdata = (x, y, xm) - self.floatdata = (x.astype(float), y.astype(float), xm.astype(float)) - self.othertypes = np.typecodes['AllInteger'] + np.typecodes['AllFloat'] - self.othertypes = [np.dtype(_).type for _ in self.othertypes] - self.uint8data = ( + return x, y, xm + + def _create_floatdata(self): + x, y, xm = self._create_intdata() + return x.astype(float), y.astype(float), xm.astype(float) + + def _create_otherdata(self): + o = np.typecodes['AllInteger'] + np.typecodes['AllFloat'] + othertypes = [np.dtype(_).type for _ in o] + x, y, xm = self._create_intdata() + uint8data = ( x.astype(np.uint8), y.astype(np.uint8), xm.astype(np.uint8) ) + return othertypes, uint8data def test_inplace_addition_scalar(self): # Test of inplace additions - (x, y, xm) = self.intdata + x, y, xm = self._create_intdata() xm[2] = masked x += 1 assert_equal(x, y + 1) xm += 1 assert_equal(xm, y + 1) - (x, _, xm) = self.floatdata + x, _, xm = self._create_floatdata() id1 = x.data.ctypes.data x += 1. assert_(id1 == x.data.ctypes.data) @@ -2767,7 +2783,7 @@ def test_inplace_addition_scalar(self): def test_inplace_addition_array(self): # Test of inplace additions - (x, y, xm) = self.intdata + x, y, xm = self._create_intdata() m = xm.mask a = arange(10, dtype=np.int16) a[-1] = masked @@ -2779,7 +2795,7 @@ def test_inplace_addition_array(self): def test_inplace_subtraction_scalar(self): # Test of inplace subtractions - (x, y, xm) = self.intdata + x, y, xm = self._create_intdata() x -= 1 assert_equal(x, y - 1) xm -= 1 @@ -2787,7 +2803,7 @@ def test_inplace_subtraction_scalar(self): def test_inplace_subtraction_array(self): # Test of inplace subtractions - (x, y, xm) = self.floatdata + x, y, xm = self._create_floatdata() m = xm.mask a = arange(10, dtype=float) a[-1] = masked @@ -2799,7 +2815,7 @@ def test_inplace_subtraction_array(self): def test_inplace_multiplication_scalar(self): # Test of inplace multiplication - (x, y, xm) = self.floatdata + x, y, xm = self._create_floatdata() x *= 2.0 assert_equal(x, y * 2) xm *= 2.0 @@ -2807,7 +2823,7 @@ def test_inplace_multiplication_scalar(self): def test_inplace_multiplication_array(self): # Test of inplace multiplication - (x, y, xm) = self.floatdata + x, y, xm = self._create_floatdata() m = xm.mask a = arange(10, dtype=float) a[-1] = masked @@ -2819,7 +2835,7 @@ def test_inplace_multiplication_array(self): def test_inplace_division_scalar_int(self): # Test of inplace division - (x, y, xm) = self.intdata + x, y, xm = self._create_intdata() x = arange(10) * 2 xm = arange(10) * 2 xm[2] = masked @@ -2830,7 +2846,7 @@ def test_inplace_division_scalar_int(self): def test_inplace_division_scalar_float(self): # Test of inplace division - (x, y, xm) = self.floatdata + x, y, xm = self._create_floatdata() x /= 2.0 assert_equal(x, y / 2.0) xm /= arange(10) @@ -2838,7 +2854,7 @@ def test_inplace_division_scalar_float(self): def test_inplace_division_array_float(self): # Test of inplace division - (x, y, xm) = self.floatdata + x, y, xm = self._create_floatdata() m = xm.mask a = arange(10, dtype=float) a[-1] = masked @@ -3015,10 +3031,11 @@ def test_datafriendly_mul_arrays(self): def test_inplace_addition_scalar_type(self): # Test of inplace additions - for t in self.othertypes: + othertypes, uint8data = self._create_otherdata() + for t in othertypes: with warnings.catch_warnings(): warnings.filterwarnings("error") - (x, y, xm) = (_.astype(t) for _ in self.uint8data) + x, y, xm = (_.astype(t) for _ in uint8data) xm[2] = masked x += t(1) assert_equal(x, y + t(1)) @@ -3027,10 +3044,11 @@ def test_inplace_addition_scalar_type(self): def test_inplace_addition_array_type(self): # Test of inplace additions - for t in self.othertypes: + othertypes, uint8data = self._create_otherdata() + for t in othertypes: with warnings.catch_warnings(): warnings.filterwarnings("error") - (x, y, xm) = (_.astype(t) for _ in self.uint8data) + x, y, xm = (_.astype(t) for _ in uint8data) m = xm.mask a = arange(10, dtype=t) a[-1] = masked @@ -3042,10 +3060,11 @@ def test_inplace_addition_array_type(self): def test_inplace_subtraction_scalar_type(self): # Test of inplace subtractions - for t in self.othertypes: + othertypes, uint8data = self._create_otherdata() + for t in othertypes: with warnings.catch_warnings(): warnings.filterwarnings("error") - (x, y, xm) = (_.astype(t) for _ in self.uint8data) + x, y, xm = (_.astype(t) for _ in uint8data) x -= t(1) assert_equal(x, y - t(1)) xm -= t(1) @@ -3053,10 +3072,11 @@ def test_inplace_subtraction_scalar_type(self): def test_inplace_subtraction_array_type(self): # Test of inplace subtractions - for t in self.othertypes: + othertypes, uint8data = self._create_otherdata() + for t in othertypes: with warnings.catch_warnings(): warnings.filterwarnings("error") - (x, y, xm) = (_.astype(t) for _ in self.uint8data) + x, y, xm = (_.astype(t) for _ in uint8data) m = xm.mask a = arange(10, dtype=t) a[-1] = masked @@ -3068,10 +3088,11 @@ def test_inplace_subtraction_array_type(self): def test_inplace_multiplication_scalar_type(self): # Test of inplace multiplication - for t in self.othertypes: + othertypes, uint8data = self._create_otherdata() + for t in othertypes: with warnings.catch_warnings(): warnings.filterwarnings("error") - (x, y, xm) = (_.astype(t) for _ in self.uint8data) + x, y, xm = (_.astype(t) for _ in uint8data) x *= t(2) assert_equal(x, y * t(2)) xm *= t(2) @@ -3079,10 +3100,11 @@ def test_inplace_multiplication_scalar_type(self): def test_inplace_multiplication_array_type(self): # Test of inplace multiplication - for t in self.othertypes: + othertypes, uint8data = self._create_otherdata() + for t in othertypes: with warnings.catch_warnings(): warnings.filterwarnings("error") - (x, y, xm) = (_.astype(t) for _ in self.uint8data) + x, y, xm = (_.astype(t) for _ in uint8data) m = xm.mask a = arange(10, dtype=t) a[-1] = masked @@ -3095,11 +3117,12 @@ def test_inplace_multiplication_array_type(self): def test_inplace_floor_division_scalar_type(self): # Test of inplace division # Check for TypeError in case of unsupported types + othertypes, uint8data = self._create_otherdata() unsupported = {np.dtype(t).type for t in np.typecodes["Complex"]} - for t in self.othertypes: + for t in othertypes: with warnings.catch_warnings(): warnings.filterwarnings("error") - (x, y, xm) = (_.astype(t) for _ in self.uint8data) + x, y, xm = (_.astype(t) for _ in uint8data) x = arange(10, dtype=t) * t(2) xm = arange(10, dtype=t) * t(2) xm[2] = masked @@ -3115,11 +3138,12 @@ def test_inplace_floor_division_scalar_type(self): def test_inplace_floor_division_array_type(self): # Test of inplace division # Check for TypeError in case of unsupported types + othertypes, uint8data = self._create_otherdata() unsupported = {np.dtype(t).type for t in np.typecodes["Complex"]} - for t in self.othertypes: + for t in othertypes: with warnings.catch_warnings(): warnings.filterwarnings("error") - (x, y, xm) = (_.astype(t) for _ in self.uint8data) + x, y, xm = (_.astype(t) for _ in uint8data) m = xm.mask a = arange(10, dtype=t) a[-1] = masked @@ -3138,14 +3162,15 @@ def test_inplace_floor_division_array_type(self): def test_inplace_division_scalar_type(self): # Test of inplace division - for t in self.othertypes: - with suppress_warnings() as sup: - sup.record(UserWarning) - - (x, y, xm) = (_.astype(t) for _ in self.uint8data) + othertypes, uint8data = self._create_otherdata() + with warnings.catch_warnings(): + warnings.simplefilter('error', DeprecationWarning) + for t in othertypes: + x, y, xm = (_.astype(t) for _ in uint8data) x = arange(10, dtype=t) * t(2) xm = arange(10, dtype=t) * t(2) xm[2] = masked + nwarns = 0 # May get a DeprecationWarning or a TypeError. # @@ -3159,28 +3184,30 @@ def test_inplace_division_scalar_type(self): try: x /= t(2) assert_equal(x, y) - except (DeprecationWarning, TypeError) as e: - warnings.warn(str(e), stacklevel=1) + except (DeprecationWarning, TypeError): + nwarns += 1 try: xm /= t(2) assert_equal(xm, y) - except (DeprecationWarning, TypeError) as e: - warnings.warn(str(e), stacklevel=1) + except (DeprecationWarning, TypeError): + nwarns += 1 if issubclass(t, np.integer): - assert_equal(len(sup.log), 2, f'Failed on type={t}.') + assert_equal(nwarns, 2, f'Failed on type={t}.') else: - assert_equal(len(sup.log), 0, f'Failed on type={t}.') + assert_equal(nwarns, 0, f'Failed on type={t}.') def test_inplace_division_array_type(self): # Test of inplace division - for t in self.othertypes: - with suppress_warnings() as sup: - sup.record(UserWarning) - (x, y, xm) = (_.astype(t) for _ in self.uint8data) + othertypes, uint8data = self._create_otherdata() + with warnings.catch_warnings(): + warnings.simplefilter('error', DeprecationWarning) + for t in othertypes: + x, y, xm = (_.astype(t) for _ in uint8data) m = xm.mask a = arange(10, dtype=t) a[-1] = masked + nwarns = 0 # May get a DeprecationWarning or a TypeError. # @@ -3194,8 +3221,8 @@ def test_inplace_division_array_type(self): try: x /= a assert_equal(x, y / a) - except (DeprecationWarning, TypeError) as e: - warnings.warn(str(e), stacklevel=1) + except (DeprecationWarning, TypeError): + nwarns += 1 try: xm /= a assert_equal(xm, y / a) @@ -3203,17 +3230,18 @@ def test_inplace_division_array_type(self): xm.mask, mask_or(mask_or(m, a.mask), (a == t(0))) ) - except (DeprecationWarning, TypeError) as e: - warnings.warn(str(e), stacklevel=1) + except (DeprecationWarning, TypeError): + nwarns += 1 if issubclass(t, np.integer): - assert_equal(len(sup.log), 2, f'Failed on type={t}.') + assert_equal(nwarns, 2, f'Failed on type={t}.') else: - assert_equal(len(sup.log), 0, f'Failed on type={t}.') + assert_equal(nwarns, 0, f'Failed on type={t}.') def test_inplace_pow_type(self): # Test keeping data w/ (inplace) power - for t in self.othertypes: + othertypes = self._create_otherdata()[0] + for t in othertypes: with warnings.catch_warnings(): warnings.filterwarnings("error") # Test pow on scalar @@ -3230,7 +3258,7 @@ def test_inplace_pow_type(self): class TestMaskedArrayMethods: # Test class for miscellaneous MaskedArrays methods. - def setup_method(self): + def _create_data(self): # Base data definition. x = np.array([8.375, 7.545, 8.828, 8.5, 1.757, 5.928, 8.43, 7.78, 9.865, 5.878, 8.979, 4.732, @@ -3260,7 +3288,7 @@ def setup_method(self): m2x = array(data=x, mask=m2) m2X = array(data=X, mask=m2.reshape(X.shape)) m2XX = array(data=XX, mask=m2.reshape(XX.shape)) - self.d = (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) + return x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX def test_generic_methods(self): # Tests some MaskedArray methods. @@ -3357,7 +3385,7 @@ def test_allany_oddities(self): def test_argmax_argmin(self): # Tests argmin & argmax on MaskedArrays. - (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d + _, _, _, _, mx, mX, _, m2x, m2X, _ = self._create_data() assert_equal(mx.argmin(), 35) assert_equal(mX.argmin(), 35) @@ -3508,7 +3536,7 @@ def test_ones(self): b = a.view() assert_(np.may_share_memory(a.mask, b.mask)) - @suppress_copy_mask_on_assignment + @pytest.mark.filterwarnings(WARNING_MARK_SPEC) def test_put(self): # Tests put. d = arange(5) @@ -4009,8 +4037,7 @@ def test_diagonal_view(self): class TestMaskedArrayMathMethods: - - def setup_method(self): + def _create_data(self): # Base data definition. x = np.array([8.375, 7.545, 8.828, 8.5, 1.757, 5.928, 8.43, 7.78, 9.865, 5.878, 8.979, 4.732, @@ -4040,11 +4067,11 @@ def setup_method(self): m2x = array(data=x, mask=m2) m2X = array(data=X, mask=m2.reshape(X.shape)) m2XX = array(data=XX, mask=m2.reshape(XX.shape)) - self.d = (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) + return x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX def test_cumsumprod(self): # Tests cumsum & cumprod on MaskedArrays. - (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d + mX = self._create_data()[5] mXcp = mX.cumsum(0) assert_equal(mXcp._data, mX.filled(0).cumsum(0)) mXcp = mX.cumsum(1) @@ -4078,7 +4105,7 @@ def test_cumsumprod_with_output(self): def test_ptp(self): # Tests ptp on MaskedArrays. - (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d + _, X, _, m, mx, mX, _, _, _, _ = self._create_data() (n, m) = X.shape assert_equal(mx.ptp(), np.ptp(mx.compressed())) rows = np.zeros(n, float) @@ -4142,7 +4169,7 @@ def test_anom(self): def test_trace(self): # Tests trace on MaskedArrays. - (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d + _, X, _, _, _, mX, _, _, _, _ = self._create_data() mXdiag = mX.diagonal() assert_equal(mX.trace(), mX.diagonal().compressed().sum()) assert_almost_equal(mX.trace(), @@ -4157,7 +4184,7 @@ def test_trace(self): def test_dot(self): # Tests dot on MaskedArrays. - (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d + _, _, _, _, mx, mX, mXX, _, _, _ = self._create_data() fx = mx.filled(0) r = mx.dot(mx) assert_almost_equal(r.filled(0), fx.dot(fx)) @@ -4206,7 +4233,7 @@ def test_varmean_nomask(self): def test_varstd(self): # Tests var & std on MaskedArrays. - (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d + _, X, XX, _, _, mX, mXX, _, _, _ = self._create_data() assert_almost_equal(mX.var(axis=None), mX.compressed().var()) assert_almost_equal(mX.std(axis=None), mX.compressed().std()) assert_almost_equal(mX.std(axis=None, ddof=1), @@ -4226,7 +4253,7 @@ def test_varstd(self): assert_almost_equal(np.sqrt(mXvar0[k]), mX[:, k].compressed().std()) - @suppress_copy_mask_on_assignment + @pytest.mark.filterwarnings(WARNING_MARK_SPEC) def test_varstd_specialcases(self): # Test a special case for var nout = np.array(-1, dtype=float) @@ -4307,6 +4334,7 @@ def test_axis_methods_nomask(self): assert_equal(a.max(-1), [3, 6]) assert_equal(a.max(1), [3, 6]) + @pytest.mark.thread_unsafe(reason="crashes with low memory") @requires_memory(free_bytes=2 * 10000 * 1000 * 2) def test_mean_overflow(self): # Test overflow in masked arrays @@ -4357,7 +4385,7 @@ def test_diff_with_n_0(self): class TestMaskedArrayMathMethodsComplex: # Test class for miscellaneous MaskedArrays methods. - def setup_method(self): + def _create_data(self): # Base data definition. x = np.array([8.375j, 7.545j, 8.828j, 8.5j, 1.757j, 5.928, 8.43, 7.78, 9.865, 5.878, 8.979, 4.732, @@ -4387,11 +4415,11 @@ def setup_method(self): m2x = array(data=x, mask=m2) m2X = array(data=X, mask=m2.reshape(X.shape)) m2XX = array(data=XX, mask=m2.reshape(XX.shape)) - self.d = (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) + return x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX def test_varstd(self): # Tests var & std on MaskedArrays. - (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d + _, X, XX, _, _, mX, mXX, _, _, _ = self._create_data() assert_almost_equal(mX.var(axis=None), mX.compressed().var()) assert_almost_equal(mX.std(axis=None), mX.compressed().std()) assert_equal(mXX.var(axis=3).shape, XX.var(axis=3).shape) @@ -4410,17 +4438,6 @@ def test_varstd(self): class TestMaskedArrayFunctions: # Test class for miscellaneous functions. - - def setup_method(self): - x = np.array([1., 1., 1., -2., pi / 2.0, 4., 5., -10., 10., 1., 2., 3.]) - y = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.]) - m1 = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0] - m2 = [0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1] - xm = masked_array(x, mask=m1) - ym = masked_array(y, mask=m2) - xm.set_fill_value(1e+20) - self.info = (xm, ym) - def test_masked_where_bool(self): x = [1, 2] y = masked_where(False, x) @@ -5125,8 +5142,7 @@ def test_convolve(self): class TestMaskedFields: - - def setup_method(self): + def _create_data(self): ilist = [1, 2, 3, 4, 5] flist = [1.1, 2.2, 3.3, 4.4, 5.5] slist = ['one', 'two', 'three', 'four', 'five'] @@ -5134,11 +5150,12 @@ def setup_method(self): mdtype = [('a', bool), ('b', bool), ('c', bool)] mask = [0, 1, 0, 0, 1] base = array(list(zip(ilist, flist, slist)), mask=mask, dtype=ddtype) - self.data = {"base": base, "mask": mask, "ddtype": ddtype, "mdtype": mdtype} + return {"base": base, "mask": mask, "ddtype": ddtype, "mdtype": mdtype} def test_set_records_masks(self): - base = self.data['base'] - mdtype = self.data['mdtype'] + data = self._create_data() + base = data['base'] + mdtype = data['mdtype'] # Set w/ nomask or masked base.mask = nomask assert_equal_records(base._mask, np.zeros(base.shape, dtype=mdtype)) @@ -5157,7 +5174,7 @@ def test_set_records_masks(self): def test_set_record_element(self): # Check setting an element of a record) - base = self.data['base'] + base = self._create_data()['base'] (base_a, base_b, base_c) = (base['a'], base['b'], base['c']) base[0] = (pi, pi, 'pi') @@ -5172,7 +5189,7 @@ def test_set_record_element(self): [b'pi', b'two', b'three', b'four', b'five']) def test_set_record_slice(self): - base = self.data['base'] + base = self._create_data()['base'] (base_a, base_b, base_c) = (base['a'], base['b'], base['c']) base[:3] = (pi, pi, 'pi') @@ -5188,7 +5205,7 @@ def test_set_record_slice(self): def test_mask_element(self): "Check record access" - base = self.data['base'] + base = self._create_data()['base'] base[0] = masked for n in ('a', 'b', 'c'): @@ -5281,9 +5298,10 @@ def test_setitem_scalar(self): assert_array_equal(arr.mask, [True, False, False]) def test_element_len(self): + data = self._create_data() # check that len() works for mvoid (Github issue #576) - for rec in self.data['base']: - assert_equal(len(rec), len(self.data['ddtype'])) + for rec in data['base']: + assert_equal(len(rec), len(data['ddtype'])) class TestMaskedObjectArray: @@ -5335,31 +5353,30 @@ def test_nested_ma(self): class TestMaskedView: - - def setup_method(self): + def _create_data(self): iterator = list(zip(np.arange(10), np.random.rand(10))) data = np.array(iterator) a = array(iterator, dtype=[('a', float), ('b', float)]) a.mask[0] = (1, 0) controlmask = np.array([1] + 19 * [0], dtype=bool) - self.data = (data, a, controlmask) + return data, a, controlmask def test_view_to_nothing(self): - (data, a, controlmask) = self.data + a = self._create_data()[1] test = a.view() assert_(isinstance(test, MaskedArray)) assert_equal(test._data, a._data) assert_equal(test._mask, a._mask) def test_view_to_type(self): - (data, a, controlmask) = self.data + data, a, _ = self._create_data() test = a.view(np.ndarray) assert_(not isinstance(test, MaskedArray)) assert_equal(test, a._data) assert_equal_records(test, data.view(a.dtype).squeeze()) def test_view_to_simple_dtype(self): - (data, a, controlmask) = self.data + data, a, controlmask = self._create_data() # View globally test = a.view(float) assert_(isinstance(test, MaskedArray)) @@ -5367,7 +5384,7 @@ def test_view_to_simple_dtype(self): assert_equal(test.mask, controlmask) def test_view_to_flexible_dtype(self): - (data, a, controlmask) = self.data + a = self._create_data()[1] test = a.view([('A', float), ('B', float)]) assert_equal(test.mask.dtype.names, ('A', 'B')) @@ -5387,7 +5404,7 @@ def test_view_to_flexible_dtype(self): assert_equal(test['B'], a['b'][-1]) def test_view_to_subdtype(self): - (data, a, controlmask) = self.data + data, a, controlmask = self._create_data() # View globally test = a.view((float, 2)) assert_(isinstance(test, MaskedArray)) @@ -5404,7 +5421,7 @@ def test_view_to_subdtype(self): assert_equal(test, data[-1]) def test_view_to_dtype_and_type(self): - (data, a, controlmask) = self.data + data, a, _ = self._create_data() test = a.view((float, 2), np.recarray) assert_equal(test, data) @@ -5582,7 +5599,7 @@ def test_coercion_int(self): def test_coercion_float(self): a_f = np.zeros((), float) - assert_warns(UserWarning, operator.setitem, a_f, (), np.ma.masked) + pytest.warns(UserWarning, operator.setitem, a_f, (), np.ma.masked) assert_(np.isnan(a_f[()])) @pytest.mark.xfail(reason="See gh-9750") @@ -5637,6 +5654,7 @@ def test_masked_array(): a = np.ma.array([0, 1, 2, 3], mask=[0, 0, 1, 0]) assert_equal(np.argwhere(a), [[1], [3]]) + def test_masked_array_no_copy(): # check nomask array is updated in place a = np.ma.array([1, 2, 3, 4]) @@ -5651,6 +5669,7 @@ def test_masked_array_no_copy(): _ = np.ma.masked_invalid(a, copy=False) assert_array_equal(a.mask, [True, False, False, False, False]) + def test_append_masked_array(): a = np.ma.masked_equal([1, 2, 3], value=2) b = np.ma.masked_equal([4, 3, 2], value=2) @@ -5689,11 +5708,71 @@ def test_append_masked_array_along_axis(): assert_array_equal(result.data, expected.data) assert_array_equal(result.mask, expected.mask) + def test_default_fill_value_complex(): # regression test for Python 3, where 'unicode' was not defined assert_(default_fill_value(1 + 1j) == 1.e20 + 0.0j) +def test_string_dtype_fill_value_on_construction(): + # Regression test for gh-29421: allow string fill_value on StringDType masked arrays + dt = np.dtypes.StringDType() + data = np.array(["A", "test", "variable", ""], dtype=dt) + mask = [True, False, True, True] + # Prior to the fix, this would TypeError; now it should succeed + arr = np.ma.MaskedArray(data, mask=mask, fill_value="FILL", dtype=dt) + assert isinstance(arr.fill_value, str) + assert arr.fill_value == "FILL" + filled = arr.filled() + # Masked positions should be replaced by 'FILL' + assert filled.tolist() == ["FILL", "test", "FILL", "FILL"] + + +def test_string_dtype_default_fill_value(): + # Regression test for gh-29421: default fill_value for StringDType is 'N/A' + dt = np.dtypes.StringDType() + data = np.array(['x', 'y', 'z'], dtype=dt) + # no fill_value passed → uses default_fill_value internally + arr = np.ma.MaskedArray(data, mask=[True, False, True], dtype=dt) + # ensure it’s stored as a Python str and equals the expected default + assert isinstance(arr.fill_value, str) + assert arr.fill_value == 'N/A' + # masked slots should be replaced by that default + assert arr.filled().tolist() == ['N/A', 'y', 'N/A'] + + +def test_string_dtype_fill_value_persists_through_slice(): + # Regression test for gh-29421: .fill_value survives slicing/viewing + dt = np.dtypes.StringDType() + arr = np.ma.MaskedArray( + ['a', 'b', 'c'], + mask=[True, False, True], + dtype=dt + ) + arr.fill_value = 'Z' + # slice triggers __array_finalize__ + sub = arr[1:] + # the slice should carry the same fill_value and behavior + assert isinstance(sub.fill_value, str) + assert sub.fill_value == 'Z' + assert sub.filled().tolist() == ['b', 'Z'] + + +def test_setting_fill_value_attribute(): + # Regression test for gh-29421: setting .fill_value post-construction works too + dt = np.dtypes.StringDType() + arr = np.ma.MaskedArray( + ["x", "longstring", "mid"], mask=[False, True, False], dtype=dt + ) + # Setting the attribute should not raise + arr.fill_value = "Z" + assert arr.fill_value == "Z" + # And filled() should use the new fill_value + assert arr.filled()[0] == "x" + assert arr.filled()[1] == "Z" + assert arr.filled()[2] == "mid" + + def test_ufunc_with_output(): # check that giving an output argument always returns that output. # Regression test for gh-8416. @@ -5799,6 +5878,7 @@ def test_mask_shape_assignment_does_not_break_masked(): b.shape = (1,) assert_equal(a.mask.shape, ()) + @pytest.mark.skipif(sys.flags.optimize > 1, reason="no docstrings present to inspect when PYTHONOPTIMIZE/Py_OptimizeFlag > 1") # noqa: E501 def test_doc_note(): @@ -5884,3 +5964,45 @@ def test_uint_fill_value_and_filled(): # And this ensures things like filled work: np.testing.assert_array_equal( a.filled(), np.array([999999, 1]).astype("uint16"), strict=True) + + +@pytest.mark.parametrize( + ('fn', 'signature'), + [ + (np.ma.nonzero, "(a)"), + (np.ma.anomalies, "(a, axis=None, dtype=None)"), + (np.ma.cumsum, "(a, axis=None, dtype=None, out=None)"), + (np.ma.compress, "(condition, a, axis=None, out=None)"), + ] +) +def test_frommethod_signature(fn, signature): + assert str(inspect.signature(fn)) == signature + + +@pytest.mark.parametrize( + ('fn', 'signature'), + [ + ( + np.ma.empty, + ( + "(shape, dtype=None, order='C', *, device=None, like=None, " + "fill_value=None, hardmask=False)" + ), + ), + ( + np.ma.empty_like, + ( + "(prototype, /, dtype=None, order='K', subok=True, shape=None, *, " + "device=None)" + ), + ), + (np.ma.squeeze, "(a, axis=None, *, fill_value=None, hardmask=False)"), + ( + np.ma.identity, + "(n, dtype=None, *, like=None, fill_value=None, hardmask=False)", + ), + ] +) +def test_convert2ma_signature(fn, signature): + assert str(inspect.signature(fn)) == signature + assert fn.__module__ == 'numpy.ma.core' diff --git a/numpy/ma/tests/test_deprecations.py b/numpy/ma/tests/test_deprecations.py index 8cc8b9c72bb9..07120b198bea 100644 --- a/numpy/ma/tests/test_deprecations.py +++ b/numpy/ma/tests/test_deprecations.py @@ -1,15 +1,11 @@ """Test deprecation and future warnings. """ -import io -import textwrap - import pytest import numpy as np from numpy.ma.core import MaskedArrayFutureWarning from numpy.ma.testutils import assert_equal -from numpy.testing import assert_warns class TestArgsort: @@ -23,7 +19,7 @@ def _test_base(self, argsort, cls): # argsort has a bad default for >1d arrays arr_2d = np.array([[1, 2], [3, 4]]).view(cls) - result = assert_warns( + result = pytest.warns( np.ma.core.MaskedArrayFutureWarning, argsort, arr_2d) assert_equal(result, argsort(arr_2d, axis=None)) @@ -53,10 +49,10 @@ def test_axis_default(self): ma_max = np.ma.maximum.reduce # check that the default axis is still None, but warns on 2d arrays - result = assert_warns(MaskedArrayFutureWarning, ma_max, data2d) + result = pytest.warns(MaskedArrayFutureWarning, ma_max, data2d) assert_equal(result, ma_max(data2d, axis=None)) - result = assert_warns(MaskedArrayFutureWarning, ma_min, data2d) + result = pytest.warns(MaskedArrayFutureWarning, ma_min, data2d) assert_equal(result, ma_min(data2d, axis=None)) # no warnings on 1d, as both new and old defaults are equivalent @@ -67,21 +63,3 @@ def test_axis_default(self): result = ma_max(data1d) assert_equal(result, ma_max(data1d, axis=None)) assert_equal(result, ma_max(data1d, axis=0)) - - -class TestFromtextfile: - def test_fromtextfile_delimitor(self): - # NumPy 1.22.0, 2021-09-23 - - textfile = io.StringIO(textwrap.dedent( - """ - A,B,C,D - 'string 1';1;1.0;'mixed column' - 'string 2';2;2.0; - 'string 3';3;3.0;123 - 'string 4';4;4.0;3.14 - """ - )) - - with pytest.warns(DeprecationWarning): - result = np.ma.mrecords.fromtextfile(textfile, delimitor=';') diff --git a/numpy/ma/tests/test_extras.py b/numpy/ma/tests/test_extras.py index 3d10e839cbc9..1993ffe3e90d 100644 --- a/numpy/ma/tests/test_extras.py +++ b/numpy/ma/tests/test_extras.py @@ -5,8 +5,8 @@ :contact: pierregm_at_uga_dot_edu """ +import inspect import itertools -import warnings import pytest @@ -68,7 +68,6 @@ assert_array_equal, assert_equal, ) -from numpy.testing import assert_warns, suppress_warnings class TestGeneric: @@ -746,7 +745,7 @@ def test_mask_row_cols_axis_deprecation(self, axis, func, rowcols_axis): x = array(np.arange(9).reshape(3, 3), mask=[[1, 0, 0], [0, 0, 0], [0, 0, 0]]) - with assert_warns(DeprecationWarning): + with pytest.warns(DeprecationWarning): res = func(x, axis=axis) assert_equal(res, mask_rowcols(x, rowcols_axis)) @@ -1078,7 +1077,7 @@ def test_3d(self): x = np.ma.arange(24).reshape(3, 4, 2) x[x % 3 == 0] = masked assert_equal(median(x, 0), [[12, 9], [6, 15], [12, 9], [18, 15]]) - x.shape = (4, 3, 2) + x = x.reshape((4, 3, 2)) assert_equal(median(x, 0), [[99, 10], [11, 99], [13, 14]]) x = np.ma.arange(24).reshape(4, 3, 2) x[x % 5 == 0] = masked @@ -1287,19 +1286,14 @@ def test_special(self): def test_empty(self): # empty arrays a = np.ma.masked_array(np.array([], dtype=float)) - with suppress_warnings() as w: - w.record(RuntimeWarning) + with pytest.warns(RuntimeWarning): assert_array_equal(np.ma.median(a), np.nan) - assert_(w.log[0].category is RuntimeWarning) # multiple dimensions a = np.ma.masked_array(np.array([], dtype=float, ndmin=3)) # no axis - with suppress_warnings() as w: - w.record(RuntimeWarning) - warnings.filterwarnings('always', '', RuntimeWarning) + with pytest.warns(RuntimeWarning): assert_array_equal(np.ma.median(a), np.nan) - assert_(w.log[0].category is RuntimeWarning) # axis 0 and 1 b = np.ma.masked_array(np.array([], dtype=float, ndmin=2)) @@ -1308,10 +1302,8 @@ def test_empty(self): # axis 2 b = np.ma.masked_array(np.array(np.nan, dtype=float, ndmin=2)) - with warnings.catch_warnings(record=True) as w: - warnings.filterwarnings('always', '', RuntimeWarning) + with pytest.warns(RuntimeWarning): assert_equal(np.ma.median(a, axis=2), b) - assert_(w[0].category is RuntimeWarning) def test_object(self): o = np.ma.masked_array(np.arange(7.)) @@ -1322,11 +1314,11 @@ def test_object(self): class TestCov: - def setup_method(self): - self.data = array(np.random.rand(12)) + def _create_data(self): + return array(np.random.rand(12)) def test_covhelper(self): - x = self.data + x = self._create_data() # Test not mask output type is a float. assert_(_covhelper(x, rowvar=True)[1].dtype, np.float32) assert_(_covhelper(x, y=x, rowvar=False)[1].dtype, np.float32) @@ -1347,7 +1339,7 @@ def test_covhelper(self): def test_1d_without_missing(self): # Test cov on 1D variable w/o missing values - x = self.data + x = self._create_data() assert_almost_equal(np.cov(x), cov(x)) assert_almost_equal(np.cov(x, rowvar=False), cov(x, rowvar=False)) assert_almost_equal(np.cov(x, rowvar=False, bias=True), @@ -1355,7 +1347,7 @@ def test_1d_without_missing(self): def test_2d_without_missing(self): # Test cov on 1 2D variable w/o missing values - x = self.data.reshape(3, 4) + x = self._create_data().reshape(3, 4) assert_almost_equal(np.cov(x), cov(x)) assert_almost_equal(np.cov(x, rowvar=False), cov(x, rowvar=False)) assert_almost_equal(np.cov(x, rowvar=False, bias=True), @@ -1363,7 +1355,7 @@ def test_2d_without_missing(self): def test_1d_with_missing(self): # Test cov 1 1D variable w/missing values - x = self.data + x = self._create_data() x[-1] = masked x -= x.mean() nx = x.compressed() @@ -1387,7 +1379,7 @@ def test_1d_with_missing(self): def test_2d_with_missing(self): # Test cov on 2D variable w/ missing value - x = self.data + x = self._create_data() x[-1] = masked x = x.reshape(3, 4) valid = np.logical_not(getmaskarray(x)).astype(int) @@ -1409,74 +1401,33 @@ def test_2d_with_missing(self): class TestCorrcoef: - def setup_method(self): - self.data = array(np.random.rand(12)) - self.data2 = array(np.random.rand(12)) - - def test_ddof(self): - # ddof raises DeprecationWarning - x, y = self.data, self.data2 - expected = np.corrcoef(x) - expected2 = np.corrcoef(x, y) - with suppress_warnings() as sup: - warnings.simplefilter("always") - assert_warns(DeprecationWarning, corrcoef, x, ddof=-1) - sup.filter(DeprecationWarning, "bias and ddof have no effect") - # ddof has no or negligible effect on the function - assert_almost_equal(np.corrcoef(x, ddof=0), corrcoef(x, ddof=0)) - assert_almost_equal(corrcoef(x, ddof=-1), expected) - assert_almost_equal(corrcoef(x, y, ddof=-1), expected2) - assert_almost_equal(corrcoef(x, ddof=3), expected) - assert_almost_equal(corrcoef(x, y, ddof=3), expected2) - - def test_bias(self): - x, y = self.data, self.data2 - expected = np.corrcoef(x) - # bias raises DeprecationWarning - with suppress_warnings() as sup: - warnings.simplefilter("always") - assert_warns(DeprecationWarning, corrcoef, x, y, True, False) - assert_warns(DeprecationWarning, corrcoef, x, y, True, True) - assert_warns(DeprecationWarning, corrcoef, x, bias=False) - sup.filter(DeprecationWarning, "bias and ddof have no effect") - # bias has no or negligible effect on the function - assert_almost_equal(corrcoef(x, bias=1), expected) + def _create_data(self): + data = array(np.random.rand(12)) + data2 = array(np.random.rand(12)) + return data, data2 def test_1d_without_missing(self): # Test cov on 1D variable w/o missing values - x = self.data + x = self._create_data()[0] assert_almost_equal(np.corrcoef(x), corrcoef(x)) assert_almost_equal(np.corrcoef(x, rowvar=False), corrcoef(x, rowvar=False)) - with suppress_warnings() as sup: - sup.filter(DeprecationWarning, "bias and ddof have no effect") - assert_almost_equal(np.corrcoef(x, rowvar=False, bias=True), - corrcoef(x, rowvar=False, bias=True)) def test_2d_without_missing(self): # Test corrcoef on 1 2D variable w/o missing values - x = self.data.reshape(3, 4) + x = self._create_data()[0].reshape(3, 4) assert_almost_equal(np.corrcoef(x), corrcoef(x)) assert_almost_equal(np.corrcoef(x, rowvar=False), corrcoef(x, rowvar=False)) - with suppress_warnings() as sup: - sup.filter(DeprecationWarning, "bias and ddof have no effect") - assert_almost_equal(np.corrcoef(x, rowvar=False, bias=True), - corrcoef(x, rowvar=False, bias=True)) def test_1d_with_missing(self): # Test corrcoef 1 1D variable w/missing values - x = self.data + x = self._create_data()[0] x[-1] = masked x -= x.mean() nx = x.compressed() - assert_almost_equal(np.corrcoef(nx), corrcoef(x)) assert_almost_equal(np.corrcoef(nx, rowvar=False), corrcoef(x, rowvar=False)) - with suppress_warnings() as sup: - sup.filter(DeprecationWarning, "bias and ddof have no effect") - assert_almost_equal(np.corrcoef(nx, rowvar=False, bias=True), - corrcoef(x, rowvar=False, bias=True)) try: corrcoef(x, allow_masked=False) except ValueError: @@ -1486,36 +1437,20 @@ def test_1d_with_missing(self): assert_almost_equal(np.corrcoef(nx, nx[::-1]), corrcoef(x, x[::-1])) assert_almost_equal(np.corrcoef(nx, nx[::-1], rowvar=False), corrcoef(x, x[::-1], rowvar=False)) - with suppress_warnings() as sup: - sup.filter(DeprecationWarning, "bias and ddof have no effect") - # ddof and bias have no or negligible effect on the function - assert_almost_equal(np.corrcoef(nx, nx[::-1]), - corrcoef(x, x[::-1], bias=1)) - assert_almost_equal(np.corrcoef(nx, nx[::-1]), - corrcoef(x, x[::-1], ddof=2)) def test_2d_with_missing(self): # Test corrcoef on 2D variable w/ missing value - x = self.data + x = self._create_data()[0] x[-1] = masked x = x.reshape(3, 4) test = corrcoef(x) control = np.corrcoef(x) assert_almost_equal(test[:-1, :-1], control[:-1, :-1]) - with suppress_warnings() as sup: - sup.filter(DeprecationWarning, "bias and ddof have no effect") - # ddof and bias have no or negligible effect on the function - assert_almost_equal(corrcoef(x, ddof=-2)[:-1, :-1], - control[:-1, :-1]) - assert_almost_equal(corrcoef(x, ddof=3)[:-1, :-1], - control[:-1, :-1]) - assert_almost_equal(corrcoef(x, bias=1)[:-1, :-1], - control[:-1, :-1]) class TestPolynomial: - # + def test_polyfit(self): # Tests polyfit # On ndarrays @@ -1877,6 +1812,18 @@ def test_shape_scalar(self): assert_equal(b.shape, (1, 1)) assert_equal(b.mask.shape, b.data.shape) + @pytest.mark.parametrize("fn", [atleast_1d, vstack, diagflat]) + def test_inspect_signature(self, fn): + name = fn.__name__ + assert getattr(np.ma, name) is fn + + assert fn.__module__ == "numpy.ma.extras" + + wrapped = getattr(np, fn.__name__) + sig_wrapped = inspect.signature(wrapped) + sig = inspect.signature(fn) + assert sig == sig_wrapped + class TestNDEnumerate: diff --git a/numpy/ma/tests/test_mrecords.py b/numpy/ma/tests/test_mrecords.py index 0da915101511..b4070df0f9a3 100644 --- a/numpy/ma/tests/test_mrecords.py +++ b/numpy/ma/tests/test_mrecords.py @@ -8,9 +8,11 @@ import numpy as np import numpy.ma as ma -from numpy._core.records import fromarrays as recfromarrays -from numpy._core.records import fromrecords as recfromrecords -from numpy._core.records import recarray +from numpy._core.records import ( + fromarrays as recfromarrays, + fromrecords as recfromrecords, + recarray, +) from numpy.ma import masked, nomask from numpy.ma.mrecords import ( MaskedRecords, @@ -20,11 +22,7 @@ fromtextfile, mrecarray, ) -from numpy.ma.testutils import ( - assert_, - assert_equal, - assert_equal_records, -) +from numpy.ma.testutils import assert_, assert_equal, assert_equal_records from numpy.testing import temppath @@ -352,24 +350,24 @@ def test_exotic_formats(self): class TestView: - def setup_method(self): - (a, b) = (np.arange(10), np.random.rand(10)) + def _create_data(self): + a, b = (np.arange(10), np.random.rand(10)) ndtype = [('a', float), ('b', float)] arr = np.array(list(zip(a, b)), dtype=ndtype) mrec = fromarrays([a, b], dtype=ndtype, fill_value=(-9., -99.)) mrec.mask[3] = (False, True) - self.data = (mrec, a, b, arr) + return mrec, a, b, arr def test_view_by_itself(self): - (mrec, a, b, arr) = self.data + mrec = self._create_data()[0] test = mrec.view() assert_(isinstance(test, MaskedRecords)) assert_equal_records(test, mrec) assert_equal_records(test._mask, mrec._mask) def test_view_simple_dtype(self): - (mrec, a, b, arr) = self.data + mrec, a, b, _ = self._create_data() ntype = (float, 2) test = mrec.view(ntype) assert_(isinstance(test, ma.MaskedArray)) @@ -377,7 +375,7 @@ def test_view_simple_dtype(self): assert_(test[3, 1] is ma.masked) def test_view_flexible_type(self): - (mrec, a, b, arr) = self.data + mrec, _, _, arr = self._create_data() alttype = [('A', float), ('B', float)] test = mrec.view(alttype) assert_(isinstance(test, MaskedRecords)) diff --git a/numpy/ma/tests/test_old_ma.py b/numpy/ma/tests/test_old_ma.py index 30c3311798fc..fcf02fa2dccb 100644 --- a/numpy/ma/tests/test_old_ma.py +++ b/numpy/ma/tests/test_old_ma.py @@ -83,11 +83,7 @@ where, zeros, ) -from numpy.testing import ( - assert_, - assert_equal, - assert_raises, -) +from numpy.testing import assert_, assert_equal, assert_raises pi = np.pi @@ -101,7 +97,7 @@ def eq(v, w, msg=''): class TestMa: - def setup_method(self): + def _create_data(self): x = np.array([1., 1., 1., -2., pi / 2.0, 4., 5., -10., 10., 1., 2., 3.]) y = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.]) a10 = 10. @@ -114,11 +110,11 @@ def setup_method(self): xf = np.where(m1, 1e+20, x) s = x.shape xm.set_fill_value(1e+20) - self.d = (x, y, a10, m1, m2, xm, ym, z, zm, xf, s) + return x, y, a10, m1, m2, xm, ym, z, zm, xf, s def test_testBasic1d(self): # Test of basic array creation and properties in 1 dimension. - (x, y, a10, m1, m2, xm, ym, z, zm, xf, s) = self.d + x, _, _, m1, _, xm, _, _, _, xf, s = self._create_data() assert_(not isMaskedArray(x)) assert_(isMaskedArray(xm)) assert_equal(shape(xm), s) @@ -133,7 +129,7 @@ def test_testBasic1d(self): @pytest.mark.parametrize("s", [(4, 3), (6, 2)]) def test_testBasic2d(self, s): # Test of basic array creation and properties in 2 dimensions. - (x, y, a10, m1, m2, xm, ym, z, zm, xf, s) = self.d + x, y, _, m1, _, xm, ym, _, _, xf, s = self._create_data() x.shape = s y.shape = s xm.shape = s @@ -152,7 +148,7 @@ def test_testBasic2d(self, s): def test_testArithmetic(self): # Test of basic arithmetic. - (x, y, a10, m1, m2, xm, ym, z, zm, xf, s) = self.d + x, y, a10, _, _, xm, ym, _, _, xf, s = self._create_data() a2d = array([[1, 2], [0, 4]]) a2dm = masked_array(a2d, [[0, 0], [1, 0]]) assert_(eq(a2d * a2d, a2d * a2dm)) @@ -196,7 +192,7 @@ def test_testMixedArithmetic(self): def test_testUfuncs1(self): # Test various functions such as sin, cos. - (x, y, a10, m1, m2, xm, ym, z, zm, xf, s) = self.d + x, y, _, _, _, xm, ym, z, zm, _, _ = self._create_data() assert_(eq(np.cos(x), cos(xm))) assert_(eq(np.cosh(x), cosh(xm))) assert_(eq(np.sin(x), sin(xm))) @@ -242,7 +238,7 @@ def test_xtestCount(self): def test_testMinMax(self): # Test minimum and maximum. - (x, y, a10, m1, m2, xm, ym, z, zm, xf, s) = self.d + x, _, _, _, _, xm, _, _, _, _, _ = self._create_data() xr = np.ravel(x) # max doesn't work if shaped xmr = ravel(xm) @@ -252,7 +248,7 @@ def test_testMinMax(self): def test_testAddSumProd(self): # Test add, sum, product. - (x, y, a10, m1, m2, xm, ym, z, zm, xf, s) = self.d + x, y, _, _, _, xm, ym, _, _, _, s = self._create_data() assert_(eq(np.add.reduce(x), add.reduce(x))) assert_(eq(np.add.accumulate(x), add.accumulate(x))) assert_(eq(4, sum(array(4), axis=0))) @@ -421,7 +417,7 @@ def test_testPut2(self): assert_(eq(x, [0, 1, 10, 40, 4])) def test_testMaPut(self): - (x, y, a10, m1, m2, xm, ym, z, zm, xf, s) = self.d + _, _, _, _, _, _, ym, _, zm, _, _ = self._create_data() m = [1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1] i = np.nonzero(m)[0] put(ym, i, zm) @@ -781,8 +777,9 @@ def test_assignment_by_condition_2(self): class TestUfuncs: - def setup_method(self): - self.d = (array([1.0, 0, -1, pi / 2] * 2, mask=[0, 1] + [0] * 6), + + def _create_data(self): + return (array([1.0, 0, -1, pi / 2] * 2, mask=[0, 1] + [0] * 6), array([1.0, 0, -1, pi / 2] * 2, mask=[1, 0] + [0] * 6),) def test_testUfuncRegression(self): @@ -811,7 +808,7 @@ def test_testUfuncRegression(self): except AttributeError: uf = getattr(fromnumeric, f) mf = getattr(np.ma, f) - args = self.d[:uf.nin] + args = self._create_data()[:uf.nin] with np.errstate(): if f in f_invalid_ignore: np.seterr(invalid='ignore') @@ -823,7 +820,7 @@ def test_testUfuncRegression(self): assert_(eqmask(ur.mask, mr.mask)) def test_reduce(self): - a = self.d[0] + a = self._create_data()[0] assert_(not alltrue(a, axis=0)) assert_(sometrue(a, axis=0)) assert_equal(sum(a[:3], axis=0), 0) @@ -847,7 +844,7 @@ def test_nonzero(self): class TestArrayMethods: - def setup_method(self): + def _create_data(self): x = np.array([8.375, 7.545, 8.828, 8.5, 1.757, 5.928, 8.43, 7.78, 9.865, 5.878, 8.979, 4.732, 3.012, 6.022, 5.095, 3.116, 5.238, 3.957, @@ -867,10 +864,10 @@ def setup_method(self): mX = array(data=X, mask=m.reshape(X.shape)) mXX = array(data=XX, mask=m.reshape(XX.shape)) - self.d = (x, X, XX, m, mx, mX, mXX) + return x, X, XX, m, mx, mX, mXX def test_trace(self): - (x, X, XX, m, mx, mX, mXX,) = self.d + _, X, _, _, _, mX, _ = self._create_data() mXdiag = mX.diagonal() assert_equal(mX.trace(), mX.diagonal().compressed().sum()) assert_(eq(mX.trace(), @@ -878,15 +875,15 @@ def test_trace(self): axis=0))) def test_clip(self): - (x, X, XX, m, mx, mX, mXX,) = self.d + x, _, _, _, mx, _, _ = self._create_data() clipped = mx.clip(2, 8) assert_(eq(clipped.mask, mx.mask)) assert_(eq(clipped._data, x.clip(2, 8))) assert_(eq(clipped._data, mx._data.clip(2, 8))) def test_ptp(self): - (x, X, XX, m, mx, mX, mXX,) = self.d - (n, m) = X.shape + _, X, _, m, mx, mX, _ = self._create_data() + n, m = X.shape # print(type(mx), mx.compressed()) # raise Exception() assert_equal(mx.ptp(), np.ptp(mx.compressed())) @@ -900,28 +897,28 @@ def test_ptp(self): assert_(eq(mX.ptp(1), rows)) def test_swapaxes(self): - (x, X, XX, m, mx, mX, mXX,) = self.d + _, _, _, _, _, mX, mXX = self._create_data() mXswapped = mX.swapaxes(0, 1) assert_(eq(mXswapped[-1], mX[:, -1])) mXXswapped = mXX.swapaxes(0, 2) assert_equal(mXXswapped.shape, (2, 2, 3, 3)) def test_cumprod(self): - (x, X, XX, m, mx, mX, mXX,) = self.d + mX = self._create_data()[5] mXcp = mX.cumprod(0) assert_(eq(mXcp._data, mX.filled(1).cumprod(0))) mXcp = mX.cumprod(1) assert_(eq(mXcp._data, mX.filled(1).cumprod(1))) def test_cumsum(self): - (x, X, XX, m, mx, mX, mXX,) = self.d + mX = self._create_data()[5] mXcp = mX.cumsum(0) assert_(eq(mXcp._data, mX.filled(0).cumsum(0))) mXcp = mX.cumsum(1) assert_(eq(mXcp._data, mX.filled(0).cumsum(1))) def test_varstd(self): - (x, X, XX, m, mx, mX, mXX,) = self.d + _, X, XX, _, _, mX, mXX = self._create_data() assert_(eq(mX.var(axis=None), mX.compressed().var())) assert_(eq(mX.std(axis=None), mX.compressed().std())) assert_(eq(mXX.var(axis=3).shape, XX.var(axis=3).shape)) diff --git a/numpy/ma/tests/test_regression.py b/numpy/ma/tests/test_regression.py index 025387ba454c..4e40a3f8ee75 100644 --- a/numpy/ma/tests/test_regression.py +++ b/numpy/ma/tests/test_regression.py @@ -1,10 +1,5 @@ import numpy as np -from numpy.testing import ( - assert_, - assert_allclose, - assert_array_equal, - suppress_warnings, -) +from numpy.testing import assert_, assert_array_equal class TestRegression: @@ -62,18 +57,6 @@ def test_var_sets_maskedarray_scalar(self): a.var(out=mout) assert_(mout._data == 0) - def test_ddof_corrcoef(self): - # See gh-3336 - x = np.ma.masked_equal([1, 2, 3, 4, 5], 4) - y = np.array([2, 2.5, 3.1, 3, 5]) - # this test can be removed after deprecation. - with suppress_warnings() as sup: - sup.filter(DeprecationWarning, "bias and ddof have no effect") - r0 = np.ma.corrcoef(x, y, ddof=0) - r1 = np.ma.corrcoef(x, y, ddof=1) - # ddof should not have an effect (it gets cancelled out) - assert_allclose(r0.data, r1.data) - def test_mask_not_backmangled(self): # See gh-10314. Test case taken from gh-3140. a = np.ma.MaskedArray([1., 2.], mask=[False, False]) diff --git a/numpy/ma/tests/test_subclassing.py b/numpy/ma/tests/test_subclassing.py index 3364e563097e..22bece987cb7 100644 --- a/numpy/ma/tests/test_subclassing.py +++ b/numpy/ma/tests/test_subclassing.py @@ -188,10 +188,10 @@ def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): class TestSubclassing: # Test suite for masked subclasses of ndarray. - def setup_method(self): + def _create_data(self): x = np.arange(5, dtype='float') mx = msubarray(x, mask=[0, 1, 0, 0, 0]) - self.data = (x, mx) + return x, mx def test_data_subclassing(self): # Tests whether the subclass is kept. @@ -205,19 +205,19 @@ def test_data_subclassing(self): def test_maskedarray_subclassing(self): # Tests subclassing MaskedArray - (x, mx) = self.data + mx = self._create_data()[1] assert_(isinstance(mx._data, subarray)) def test_masked_unary_operations(self): # Tests masked_unary_operation - (x, mx) = self.data + x, mx = self._create_data() with np.errstate(divide='ignore'): assert_(isinstance(log(mx), msubarray)) assert_equal(log(x), np.log(x)) def test_masked_binary_operations(self): # Tests masked_binary_operation - (x, mx) = self.data + x, mx = self._create_data() # Result should be a msubarray assert_(isinstance(add(mx, mx), msubarray)) assert_(isinstance(add(mx, x), msubarray)) @@ -230,7 +230,7 @@ def test_masked_binary_operations(self): def test_masked_binary_operations2(self): # Tests domained_masked_binary_operation - (x, mx) = self.data + x, mx = self._create_data() xmx = masked_array(mx.data.__array__(), mask=mx.mask) assert_(isinstance(divide(mx, mx), msubarray)) assert_(isinstance(divide(mx, x), msubarray)) @@ -427,20 +427,20 @@ def test_array_no_inheritance(): class TestClassWrapping: # Test suite for classes that wrap MaskedArrays - def setup_method(self): + def _create_data(self): m = np.ma.masked_array([1, 3, 5], mask=[False, True, False]) wm = WrappedArray(m) - self.data = (m, wm) + return m, wm def test_masked_unary_operations(self): # Tests masked_unary_operation - (m, wm) = self.data + wm = self._create_data()[1] with np.errstate(divide='ignore'): assert_(isinstance(np.log(wm), WrappedArray)) def test_masked_binary_operations(self): # Tests masked_binary_operation - (m, wm) = self.data + m, wm = self._create_data() # Result should be a WrappedArray assert_(isinstance(np.add(wm, wm), WrappedArray)) assert_(isinstance(np.add(m, wm), WrappedArray)) diff --git a/numpy/ma/testutils.py b/numpy/ma/testutils.py index bffcc34b759c..0df3b1757fd6 100644 --- a/numpy/ma/testutils.py +++ b/numpy/ma/testutils.py @@ -121,7 +121,7 @@ def assert_equal(actual, desired, err_msg=''): if not isinstance(actual, dict): raise AssertionError(repr(type(actual))) assert_equal(len(actual), len(desired), err_msg) - for k, i in desired.items(): + for k in desired: if k not in actual: raise AssertionError(f"{k} not in {actual}") assert_equal(actual[k], desired[k], f'key={k!r}\n{err_msg}') @@ -159,7 +159,7 @@ def fail_if_equal(actual, desired, err_msg='',): if not isinstance(actual, dict): raise AssertionError(repr(type(actual))) fail_if_equal(len(actual), len(desired), err_msg) - for k, i in desired.items(): + for k in desired: if k not in actual: raise AssertionError(repr(k)) fail_if_equal(actual[k], desired[k], f'key={k!r}\n{err_msg}') diff --git a/numpy/ma/testutils.pyi b/numpy/ma/testutils.pyi new file mode 100644 index 000000000000..92b843b93a43 --- /dev/null +++ b/numpy/ma/testutils.pyi @@ -0,0 +1,69 @@ +import numpy as np +from numpy._typing import NDArray +from numpy.testing import ( + TestCase, + assert_, + assert_allclose, + assert_array_almost_equal_nulp, + assert_raises, +) +from numpy.testing._private.utils import _ComparisonFunc + +__all__ = [ + "TestCase", + "almost", + "approx", + "assert_", + "assert_allclose", + "assert_almost_equal", + "assert_array_almost_equal", + "assert_array_almost_equal_nulp", + "assert_array_approx_equal", + "assert_array_compare", + "assert_array_equal", + "assert_array_less", + "assert_close", + "assert_equal", + "assert_equal_records", + "assert_mask_equal", + "assert_not_equal", + "assert_raises", + "fail_if_array_equal", +] + +def approx( + a: object, b: object, fill_value: bool = True, rtol: float = 1e-5, atol: float = 1e-8 +) -> np.ndarray[tuple[int], np.dtype[np.bool]]: ... +def almost(a: object, b: object, decimal: int = 6, fill_value: bool = True) -> np.ndarray[tuple[int], np.dtype[np.bool]]: ... + +# +def assert_equal_records(a: NDArray[np.void], b: NDArray[np.void]) -> None: ... +def assert_equal(actual: object, desired: object, err_msg: str = "") -> None: ... +def fail_if_equal(actual: object, desired: object, err_msg: str = "") -> None: ... +def assert_almost_equal( + actual: object, desired: object, decimal: int = 7, err_msg: str = "", verbose: bool = True +) -> None: ... + +# +def assert_array_compare( + comparison: _ComparisonFunc, + x: object, + y: object, + err_msg: str = "", + verbose: bool = True, + header: str = "", + fill_value: bool = True, +) -> None: ... +def assert_array_equal(x: object, y: object, err_msg: str = "", verbose: bool = True) -> None: ... +def fail_if_array_equal(x: object, y: object, err_msg: str = "", verbose: bool = True) -> None: ... +def assert_array_approx_equal( + x: object, y: object, decimal: int = 6, err_msg: str = "", verbose: bool = True +) -> None: ... +def assert_array_almost_equal( + x: object, y: object, decimal: int = 6, err_msg: str = "", verbose: bool = True +) -> None: ... +def assert_array_less(x: object, y: object, err_msg: str = "", verbose: bool = True) -> None: ... +def assert_mask_equal(m1: object, m2: object, err_msg: str = "") -> None: ... + +assert_not_equal = fail_if_equal +assert_close = assert_almost_equal diff --git a/numpy/matlib.py b/numpy/matlib.py index f27d503cdbca..151cb6b369b4 100644 --- a/numpy/matlib.py +++ b/numpy/matlib.py @@ -56,7 +56,7 @@ def empty(shape, dtype=None, order='C'): >>> np.matlib.empty((2, 2)) # filled with random data matrix([[ 6.76425276e-320, 9.79033856e-307], # random [ 7.39337286e-309, 3.22135945e-309]]) - >>> np.matlib.empty((2, 2), dtype=int) + >>> np.matlib.empty((2, 2), dtype=np.int_) matrix([[ 6600475, 0], # random [ 6586976, 22740995]]) @@ -177,7 +177,7 @@ def identity(n, dtype=None): Examples -------- >>> import numpy.matlib - >>> np.matlib.identity(3, dtype=int) + >>> np.matlib.identity(3, dtype=np.int_) matrix([[1, 0, 0], [0, 1, 0], [0, 0, 1]]) @@ -222,7 +222,7 @@ def eye(n, M=None, k=0, dtype=float, order='C'): Examples -------- >>> import numpy.matlib - >>> np.matlib.eye(3, k=1, dtype=float) + >>> np.matlib.eye(3, k=1, dtype=np.float64) matrix([[0., 1., 0.], [0., 0., 1.], [0., 0., 0.]]) diff --git a/numpy/matlib.pyi b/numpy/matlib.pyi index baeadc078028..f446dbf1c4b9 100644 --- a/numpy/matlib.pyi +++ b/numpy/matlib.pyi @@ -1,8 +1,8 @@ -from typing import Any, Literal, TypeAlias, TypeVar, overload +from typing import Any, Literal, overload import numpy as np import numpy.typing as npt -from numpy import ( # noqa: F401 +from numpy import ( # type: ignore[deprecated] # noqa: F401 False_, ScalarType, True_, @@ -222,7 +222,6 @@ from numpy import ( # noqa: F401 i0, iinfo, imag, - in1d, index_exp, indices, inexact, @@ -451,7 +450,6 @@ from numpy import ( # noqa: F401 trace, transpose, trapezoid, - trapz, tri, tril, tril_indices, @@ -507,9 +505,8 @@ __all__ += np.__all__ ### -_T = TypeVar("_T", bound=np.generic) -_Matrix: TypeAlias = np.matrix[tuple[int, int], np.dtype[_T]] -_Order: TypeAlias = Literal["C", "F"] +type _Matrix[ScalarT: np.generic] = np.matrix[tuple[int, int], np.dtype[ScalarT]] +type _Order = Literal["C", "F"] ### @@ -517,7 +514,7 @@ _Order: TypeAlias = Literal["C", "F"] @overload def empty(shape: int | tuple[int, int], dtype: None = None, order: _Order = "C") -> _Matrix[np.float64]: ... @overload -def empty(shape: int | tuple[int, int], dtype: _DTypeLike[_T], order: _Order = "C") -> _Matrix[_T]: ... +def empty[ScalarT: np.generic](shape: int | tuple[int, int], dtype: _DTypeLike[ScalarT], order: _Order = "C") -> _Matrix[ScalarT]: ... @overload def empty(shape: int | tuple[int, int], dtype: npt.DTypeLike, order: _Order = "C") -> _Matrix[Any]: ... @@ -525,7 +522,7 @@ def empty(shape: int | tuple[int, int], dtype: npt.DTypeLike, order: _Order = "C @overload def ones(shape: int | tuple[int, int], dtype: None = None, order: _Order = "C") -> _Matrix[np.float64]: ... @overload -def ones(shape: int | tuple[int, int], dtype: _DTypeLike[_T], order: _Order = "C") -> _Matrix[_T]: ... +def ones[ScalarT: np.generic](shape: int | tuple[int, int], dtype: _DTypeLike[ScalarT], order: _Order = "C") -> _Matrix[ScalarT]: ... @overload def ones(shape: int | tuple[int, int], dtype: npt.DTypeLike, order: _Order = "C") -> _Matrix[Any]: ... @@ -533,7 +530,7 @@ def ones(shape: int | tuple[int, int], dtype: npt.DTypeLike, order: _Order = "C" @overload def zeros(shape: int | tuple[int, int], dtype: None = None, order: _Order = "C") -> _Matrix[np.float64]: ... @overload -def zeros(shape: int | tuple[int, int], dtype: _DTypeLike[_T], order: _Order = "C") -> _Matrix[_T]: ... +def zeros[ScalarT: np.generic](shape: int | tuple[int, int], dtype: _DTypeLike[ScalarT], order: _Order = "C") -> _Matrix[ScalarT]: ... @overload def zeros(shape: int | tuple[int, int], dtype: npt.DTypeLike, order: _Order = "C") -> _Matrix[Any]: ... @@ -541,7 +538,7 @@ def zeros(shape: int | tuple[int, int], dtype: npt.DTypeLike, order: _Order = "C @overload def identity(n: int, dtype: None = None) -> _Matrix[np.float64]: ... @overload -def identity(n: int, dtype: _DTypeLike[_T]) -> _Matrix[_T]: ... +def identity[ScalarT: np.generic](n: int, dtype: _DTypeLike[ScalarT]) -> _Matrix[ScalarT]: ... @overload def identity(n: int, dtype: npt.DTypeLike | None = None) -> _Matrix[Any]: ... @@ -555,11 +552,11 @@ def eye( order: _Order = "C", ) -> _Matrix[np.float64]: ... @overload -def eye(n: int, M: int | None, k: int, dtype: _DTypeLike[_T], order: _Order = "C") -> _Matrix[_T]: ... +def eye[ScalarT: np.generic](n: int, M: int | None, k: int, dtype: _DTypeLike[ScalarT], order: _Order = "C") -> _Matrix[ScalarT]: ... @overload -def eye(n: int, M: int | None = None, k: int = 0, *, dtype: _DTypeLike[_T], order: _Order = "C") -> _Matrix[_T]: ... +def eye[ScalarT: np.generic](n: int, M: int | None = None, k: int = 0, *, dtype: _DTypeLike[ScalarT], order: _Order = "C") -> _Matrix[ScalarT]: ... @overload -def eye(n: int, M: int | None = None, k: int = 0, dtype: npt.DTypeLike = ..., order: _Order = "C") -> _Matrix[Any]: ... +def eye(n: int, M: int | None = None, k: int = 0, dtype: npt.DTypeLike | None = ..., order: _Order = "C") -> _Matrix[Any]: ... # @overload @@ -575,8 +572,8 @@ def randn(arg: int, /, *args: int) -> _Matrix[np.float64]: ... # @overload -def repmat(a: _Matrix[_T], m: int, n: int) -> _Matrix[_T]: ... +def repmat[ScalarT: np.generic](a: _Matrix[ScalarT], m: int, n: int) -> _Matrix[ScalarT]: ... @overload -def repmat(a: _ArrayLike[_T], m: int, n: int) -> npt.NDArray[_T]: ... +def repmat[ScalarT: np.generic](a: _ArrayLike[ScalarT], m: int, n: int) -> npt.NDArray[ScalarT]: ... @overload def repmat(a: npt.ArrayLike, m: int, n: int) -> npt.NDArray[Any]: ... diff --git a/numpy/matrixlib/__init__.pyi b/numpy/matrixlib/__init__.pyi index 56ae8bf4c84b..ad4091d98d06 100644 --- a/numpy/matrixlib/__init__.pyi +++ b/numpy/matrixlib/__init__.pyi @@ -1,5 +1,3 @@ -from numpy import matrix - -from .defmatrix import asmatrix, bmat +from .defmatrix import asmatrix, bmat, matrix __all__ = ["matrix", "bmat", "asmatrix"] diff --git a/numpy/matrixlib/defmatrix.py b/numpy/matrixlib/defmatrix.py index 39b9a935500e..d706e09ed947 100644 --- a/numpy/matrixlib/defmatrix.py +++ b/numpy/matrixlib/defmatrix.py @@ -315,11 +315,11 @@ def sum(self, axis=None, dtype=None, out=None): >>> x.sum(axis=1) matrix([[3], [7]]) - >>> x.sum(axis=1, dtype='float') + >>> x.sum(axis=1, dtype=np.float64) matrix([[3.], [7.]]) - >>> out = np.zeros((2, 1), dtype='float') - >>> x.sum(axis=1, dtype='float', out=np.asmatrix(out)) + >>> out = np.zeros((2, 1), dtype=np.float64) + >>> x.sum(axis=1, dtype=np.float64, out=np.asmatrix(out)) matrix([[3.], [7.]]) diff --git a/numpy/matrixlib/defmatrix.pyi b/numpy/matrixlib/defmatrix.pyi index ee8f83746998..55b9d795078c 100644 --- a/numpy/matrixlib/defmatrix.pyi +++ b/numpy/matrixlib/defmatrix.pyi @@ -1,17 +1,216 @@ +from _typeshed import Incomplete from collections.abc import Mapping, Sequence -from typing import Any +from types import EllipsisType +from typing import Any, ClassVar, Literal as L, Self, SupportsIndex, overload +from typing_extensions import TypeVar -from numpy import matrix -from numpy._typing import ArrayLike, DTypeLike, NDArray +import numpy as np +from numpy._typing import ( + ArrayLike, + DTypeLike, + NDArray, + _AnyShape, + _ArrayLikeInt_co, + _NestedSequence, + _ShapeLike, +) __all__ = ["asmatrix", "bmat", "matrix"] +_ShapeT_co = TypeVar("_ShapeT_co", bound=_2D, default=_2D, covariant=True) +_DTypeT_co = TypeVar("_DTypeT_co", bound=np.dtype, default=np.dtype, covariant=True) + +type _2D = tuple[int, int] +type _Matrix[ScalarT: np.generic] = matrix[_2D, np.dtype[ScalarT]] +type _ToIndex1 = slice | EllipsisType | NDArray[np.integer | np.bool] | _NestedSequence[int] | None +type _ToIndex2 = tuple[_ToIndex1, _ToIndex1 | SupportsIndex] | tuple[_ToIndex1 | SupportsIndex, _ToIndex1] + +### + +class matrix(np.ndarray[_ShapeT_co, _DTypeT_co]): + __array_priority__: ClassVar[float] = 10.0 # pyright: ignore[reportIncompatibleMethodOverride] + + def __new__( + subtype, # pyright: ignore[reportSelfClsParameterName] + data: ArrayLike, + dtype: DTypeLike | None = None, + copy: bool = True, + ) -> _Matrix[Incomplete]: ... + + # + @overload # type: ignore[override] + def __getitem__( + self, key: SupportsIndex | _ArrayLikeInt_co | tuple[SupportsIndex | _ArrayLikeInt_co, ...], / + ) -> Incomplete: ... + @overload + def __getitem__(self, key: _ToIndex1 | _ToIndex2, /) -> matrix[_2D, _DTypeT_co]: ... + @overload + def __getitem__(self: _Matrix[np.void], key: str, /) -> _Matrix[Incomplete]: ... + @overload + def __getitem__(self: _Matrix[np.void], key: list[str], /) -> matrix[_2D, _DTypeT_co]: ... # pyright: ignore[reportIncompatibleMethodOverride] + + # + def __mul__(self, other: ArrayLike, /) -> _Matrix[Incomplete]: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride] + def __rmul__(self, other: ArrayLike, /) -> _Matrix[Incomplete]: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride] + + # + def __pow__(self, other: ArrayLike, /) -> _Matrix[Incomplete]: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride] + def __rpow__(self, other: ArrayLike, /) -> _Matrix[Incomplete]: ... # type: ignore[override] + + # keep in sync with `prod` and `mean` + @overload # type: ignore[override] + def sum(self, axis: None = None, dtype: DTypeLike | None = None, out: None = None) -> Incomplete: ... + @overload + def sum(self, axis: _ShapeLike, dtype: DTypeLike | None = None, out: None = None) -> _Matrix[Incomplete]: ... + @overload + def sum[OutT: np.ndarray](self, axis: _ShapeLike | None, dtype: DTypeLike | None, out: OutT) -> OutT: ... + @overload + def sum[OutT: np.ndarray](self, axis: _ShapeLike | None = None, dtype: DTypeLike | None = None, *, out: OutT) -> OutT: ... # pyright: ignore[reportIncompatibleMethodOverride] + + # keep in sync with `sum` and `mean` + @overload # type: ignore[override] + def prod(self, axis: None = None, dtype: DTypeLike | None = None, out: None = None) -> Incomplete: ... + @overload + def prod(self, axis: _ShapeLike, dtype: DTypeLike | None = None, out: None = None) -> _Matrix[Incomplete]: ... + @overload + def prod[OutT: np.ndarray](self, axis: _ShapeLike | None, dtype: DTypeLike | None, out: OutT) -> OutT: ... + @overload + def prod[OutT: np.ndarray](self, axis: _ShapeLike | None = None, dtype: DTypeLike | None = None, *, out: OutT) -> OutT: ... # pyright: ignore[reportIncompatibleMethodOverride] + + # keep in sync with `sum` and `prod` + @overload # type: ignore[override] + def mean(self, axis: None = None, dtype: DTypeLike | None = None, out: None = None) -> Incomplete: ... + @overload + def mean(self, axis: _ShapeLike, dtype: DTypeLike | None = None, out: None = None) -> _Matrix[Incomplete]: ... + @overload + def mean[OutT: np.ndarray](self, axis: _ShapeLike | None, dtype: DTypeLike | None, out: OutT) -> OutT: ... + @overload + def mean[OutT: np.ndarray](self, axis: _ShapeLike | None = None, dtype: DTypeLike | None = None, *, out: OutT) -> OutT: ... # pyright: ignore[reportIncompatibleMethodOverride] + + # keep in sync with `var` + @overload # type: ignore[override] + def std(self, axis: None = None, dtype: DTypeLike | None = None, out: None = None, ddof: float = 0) -> Incomplete: ... + @overload + def std(self, axis: _ShapeLike, dtype: DTypeLike | None = None, out: None = None, ddof: float = 0) -> _Matrix[Incomplete]: ... + @overload + def std[OutT: np.ndarray](self, axis: _ShapeLike | None, dtype: DTypeLike | None, out: OutT, ddof: float = 0) -> OutT: ... + @overload + def std[OutT: np.ndarray]( # pyright: ignore[reportIncompatibleMethodOverride] + self, axis: _ShapeLike | None = None, dtype: DTypeLike | None = None, *, out: OutT, ddof: float = 0 + ) -> OutT: ... + + # keep in sync with `std` + @overload # type: ignore[override] + def var(self, axis: None = None, dtype: DTypeLike | None = None, out: None = None, ddof: float = 0) -> Incomplete: ... + @overload + def var(self, axis: _ShapeLike, dtype: DTypeLike | None = None, out: None = None, ddof: float = 0) -> _Matrix[Incomplete]: ... + @overload + def var[OutT: np.ndarray](self, axis: _ShapeLike | None, dtype: DTypeLike | None, out: OutT, ddof: float = 0) -> OutT: ... + @overload + def var[OutT: np.ndarray]( # pyright: ignore[reportIncompatibleMethodOverride] + self, axis: _ShapeLike | None = None, dtype: DTypeLike | None = None, *, out: OutT, ddof: float = 0 + ) -> OutT: ... + + # keep in sync with `all` + @overload # type: ignore[override] + def any(self, axis: None = None, out: None = None) -> np.bool: ... + @overload + def any(self, axis: _ShapeLike, out: None = None) -> _Matrix[np.bool]: ... + @overload + def any[OutT: np.ndarray](self, axis: _ShapeLike | None, out: OutT) -> OutT: ... + @overload + def any[OutT: np.ndarray](self, axis: _ShapeLike | None = None, *, out: OutT) -> OutT: ... # pyright: ignore[reportIncompatibleMethodOverride] + + # keep in sync with `any` + @overload # type: ignore[override] + def all(self, axis: None = None, out: None = None) -> np.bool: ... + @overload + def all(self, axis: _ShapeLike, out: None = None) -> _Matrix[np.bool]: ... + @overload + def all[OutT: np.ndarray](self, axis: _ShapeLike | None, out: OutT) -> OutT: ... + @overload + def all[OutT: np.ndarray](self, axis: _ShapeLike | None = None, *, out: OutT) -> OutT: ... # pyright: ignore[reportIncompatibleMethodOverride] + + # keep in sync with `min` and `ptp` + @overload # type: ignore[override] + def max[ScalarT: np.generic](self: NDArray[ScalarT], axis: None = None, out: None = None) -> ScalarT: ... + @overload + def max(self, axis: _ShapeLike, out: None = None) -> matrix[_2D, _DTypeT_co]: ... + @overload + def max[OutT: np.ndarray](self, axis: _ShapeLike | None, out: OutT) -> OutT: ... + @overload + def max[OutT: np.ndarray](self, axis: _ShapeLike | None = None, *, out: OutT) -> OutT: ... # pyright: ignore[reportIncompatibleMethodOverride] + + # keep in sync with `max` and `ptp` + @overload # type: ignore[override] + def min[ScalarT: np.generic](self: NDArray[ScalarT], axis: None = None, out: None = None) -> ScalarT: ... + @overload + def min(self, axis: _ShapeLike, out: None = None) -> matrix[_2D, _DTypeT_co]: ... + @overload + def min[OutT: np.ndarray](self, axis: _ShapeLike | None, out: OutT) -> OutT: ... + @overload + def min[OutT: np.ndarray](self, axis: _ShapeLike | None = None, *, out: OutT) -> OutT: ... # pyright: ignore[reportIncompatibleMethodOverride] + + # keep in sync with `max` and `min` + @overload + def ptp[ScalarT: np.generic](self: NDArray[ScalarT], axis: None = None, out: None = None) -> ScalarT: ... + @overload + def ptp(self, axis: _ShapeLike, out: None = None) -> matrix[_2D, _DTypeT_co]: ... + @overload + def ptp[OutT: np.ndarray](self, axis: _ShapeLike | None, out: OutT) -> OutT: ... + @overload + def ptp[OutT: np.ndarray](self, axis: _ShapeLike | None = None, *, out: OutT) -> OutT: ... # pyright: ignore[reportIncompatibleMethodOverride] + + # keep in sync with `argmin` + @overload # type: ignore[override] + def argmax[ScalarT: np.generic](self: NDArray[ScalarT], axis: None = None, out: None = None) -> np.intp: ... + @overload + def argmax(self, axis: _ShapeLike, out: None = None) -> _Matrix[np.intp]: ... + @overload + def argmax[OutT: NDArray[np.integer | np.bool]](self, axis: _ShapeLike | None, out: OutT) -> OutT: ... + @overload + def argmax[OutT: NDArray[np.integer | np.bool]](self, axis: _ShapeLike | None = None, *, out: OutT) -> OutT: ... # pyright: ignore[reportIncompatibleMethodOverride] + + # keep in sync with `argmax` + @overload # type: ignore[override] + def argmin[ScalarT: np.generic](self: NDArray[ScalarT], axis: None = None, out: None = None) -> np.intp: ... + @overload + def argmin(self, axis: _ShapeLike, out: None = None) -> _Matrix[np.intp]: ... + @overload + def argmin[OutT: NDArray[np.integer | np.bool]](self, axis: _ShapeLike | None, out: OutT) -> OutT: ... + @overload + def argmin[OutT: NDArray[np.integer | np.bool]](self, axis: _ShapeLike | None = None, *, out: OutT) -> OutT: ... # pyright: ignore[reportIncompatibleMethodOverride] + + # the second overload handles the (rare) case that the matrix is not 2-d + @overload + def tolist[T](self: _Matrix[np.generic[T]]) -> list[list[T]]: ... # pyright: ignore[reportIncompatibleMethodOverride] + @overload + def tolist(self) -> Incomplete: ... # pyright: ignore[reportIncompatibleMethodOverride] + + # these three methods will at least return a `2-d` array of shape (1, n) + def squeeze(self, /, axis: _ShapeLike | None = None) -> matrix[_2D, _DTypeT_co]: ... + def ravel(self, /, order: L["K", "A", "C", "F"] | None = "C") -> matrix[_2D, _DTypeT_co]: ... # type: ignore[override] + def flatten(self, /, order: L["K", "A", "C", "F"] | None = "C") -> matrix[_2D, _DTypeT_co]: ... # type: ignore[override] + + # matrix.T is inherited from _ScalarOrArrayCommon + def getT(self) -> Self: ... + @property + def I(self) -> _Matrix[Incomplete]: ... # noqa: E743 + def getI(self) -> _Matrix[Incomplete]: ... + @property + def A(self) -> np.ndarray[_2D, _DTypeT_co]: ... + def getA(self) -> np.ndarray[_2D, _DTypeT_co]: ... + @property + def A1(self) -> np.ndarray[_AnyShape, _DTypeT_co]: ... + def getA1(self) -> np.ndarray[_AnyShape, _DTypeT_co]: ... + @property + def H(self) -> matrix[_2D, _DTypeT_co]: ... + def getH(self) -> matrix[_2D, _DTypeT_co]: ... + def bmat( obj: str | Sequence[ArrayLike] | NDArray[Any], - ldict: Mapping[str, Any] | None = ..., - gdict: Mapping[str, Any] | None = ..., -) -> matrix[tuple[int, int], Any]: ... + ldict: Mapping[str, Any] | None = None, + gdict: Mapping[str, Any] | None = None, +) -> _Matrix[Incomplete]: ... -def asmatrix( - data: ArrayLike, dtype: DTypeLike = ... -) -> matrix[tuple[int, int], Any]: ... +def asmatrix(data: ArrayLike, dtype: DTypeLike | None = None) -> _Matrix[Incomplete]: ... diff --git a/numpy/matrixlib/tests/test_defmatrix.py b/numpy/matrixlib/tests/test_defmatrix.py index ce23933ab7f7..a0e868f5fe2c 100644 --- a/numpy/matrixlib/tests/test_defmatrix.py +++ b/numpy/matrixlib/tests/test_defmatrix.py @@ -288,10 +288,10 @@ def test_instance_methods(self): 'argmin', 'choose', 'dump', 'dumps', 'fill', 'getfield', 'getA', 'getA1', 'item', 'nonzero', 'put', 'putmask', 'resize', 'searchsorted', 'setflags', 'setfield', 'sort', - 'partition', 'argpartition', 'newbyteorder', 'to_device', + 'partition', 'argpartition', 'to_device', 'take', 'tofile', 'tolist', 'tobytes', 'all', 'any', 'sum', 'argmax', 'argmin', 'min', 'max', 'mean', 'var', 'ptp', - 'prod', 'std', 'ctypes', 'itemset', 'bitwise_count', + 'prod', 'std', 'ctypes', 'bitwise_count', ] for attrib in dir(a): if attrib.startswith('_') or attrib in excluded_methods: diff --git a/numpy/matrixlib/tests/test_masked_matrix.py b/numpy/matrixlib/tests/test_masked_matrix.py index e6df047ee6ca..ee3dc96b9ac5 100644 --- a/numpy/matrixlib/tests/test_masked_matrix.py +++ b/numpy/matrixlib/tests/test_masked_matrix.py @@ -116,7 +116,7 @@ def test_flat(self): # Test setting test = masked_array(np.matrix([[1, 2, 3]]), mask=[0, 0, 1]) testflat = test.flat - testflat[:] = testflat[[2, 1, 0]] + testflat[:] = testflat[np.array([2, 1, 0])] assert_equal(test, control) testflat[0] = 9 # test that matrices keep the correct shape (#4615) @@ -182,26 +182,26 @@ def test_view(self): class TestSubclassing: # Test suite for masked subclasses of ndarray. - def setup_method(self): + def _create_data(self): x = np.arange(5, dtype='float') mx = MMatrix(x, mask=[0, 1, 0, 0, 0]) - self.data = (x, mx) + return x, mx def test_maskedarray_subclassing(self): # Tests subclassing MaskedArray - (x, mx) = self.data + mx = self._create_data()[1] assert_(isinstance(mx._data, np.matrix)) def test_masked_unary_operations(self): # Tests masked_unary_operation - (x, mx) = self.data + x, mx = self._create_data() with np.errstate(divide='ignore'): assert_(isinstance(log(mx), MMatrix)) assert_equal(log(x), np.log(x)) def test_masked_binary_operations(self): # Tests masked_binary_operation - (x, mx) = self.data + x, mx = self._create_data() # Result should be a MMatrix assert_(isinstance(add(mx, mx), MMatrix)) assert_(isinstance(add(mx, x), MMatrix)) @@ -215,7 +215,7 @@ def test_masked_binary_operations(self): def test_masked_binary_operations2(self): # Tests domained_masked_binary_operation - (x, mx) = self.data + x, mx = self._create_data() xmx = masked_array(mx.data.__array__(), mask=mx.mask) assert_(isinstance(divide(mx, mx), MMatrix)) assert_(isinstance(divide(mx, x), MMatrix)) diff --git a/numpy/matrixlib/tests/test_matrix_linalg.py b/numpy/matrixlib/tests/test_matrix_linalg.py index 4e639653bda4..99acf32adc49 100644 --- a/numpy/matrixlib/tests/test_matrix_linalg.py +++ b/numpy/matrixlib/tests/test_matrix_linalg.py @@ -1,4 +1,6 @@ """ Test functions for linalg module using the matrix class.""" +import pytest + import numpy as np from numpy.linalg.tests.test_linalg import ( CondCases, @@ -12,13 +14,13 @@ PinvCases, SolveCases, SVDCases, + TestQR as _TestQR, _TestNorm2D, _TestNormDoubleBase, _TestNormInt64Base, _TestNormSingleBase, apply_tag, ) -from numpy.linalg.tests.test_linalg import TestQR as _TestQR CASES = [] @@ -81,6 +83,9 @@ class TestDetMatrix(DetCases, MatrixTestCase): pass +@pytest.mark.thread_unsafe( + reason="residuals not calculated properly for square tests (gh-29851)" +) class TestLstsqMatrix(LstsqCases, MatrixTestCase): pass diff --git a/numpy/meson.build b/numpy/meson.build index 67e4861d7ad6..45d5a2b52eb8 100644 --- a/numpy/meson.build +++ b/numpy/meson.build @@ -32,7 +32,7 @@ endif # than a `.a` file extension in order not to break including them in a # distutils-based build (see gh-23981 and # https://mesonbuild.com/FAQ.html#why-does-building-my-project-with-msvc-output-static-libraries-called-libfooa) -if is_windows and cc.get_id() == 'msvc' +if is_windows and cc.get_id() in ['msvc', 'clang-cl'] name_prefix_staticlib = '' name_suffix_staticlib = 'lib' else diff --git a/numpy/polynomial/__init__.pyi b/numpy/polynomial/__init__.pyi index 6fb0fb5ec7fa..ad005a2dbe38 100644 --- a/numpy/polynomial/__init__.pyi +++ b/numpy/polynomial/__init__.pyi @@ -10,12 +10,18 @@ from .polynomial import Polynomial __all__ = [ "set_default_printstyle", - "polynomial", "Polynomial", - "chebyshev", "Chebyshev", - "legendre", "Legendre", - "hermite", "Hermite", - "hermite_e", "HermiteE", - "laguerre", "Laguerre", + "polynomial", + "Polynomial", + "chebyshev", + "Chebyshev", + "legendre", + "Legendre", + "hermite", + "Hermite", + "hermite_e", + "HermiteE", + "laguerre", + "Laguerre", ] def set_default_printstyle(style: Literal["ascii", "unicode"]) -> None: ... diff --git a/numpy/polynomial/_polybase.pyi b/numpy/polynomial/_polybase.pyi index 6d71a8cb8d2c..2fdfd24db7a9 100644 --- a/numpy/polynomial/_polybase.pyi +++ b/numpy/polynomial/_polybase.pyi @@ -1,19 +1,7 @@ import abc import decimal -import numbers -from collections.abc import Iterator, Mapping, Sequence -from typing import ( - Any, - ClassVar, - Generic, - Literal, - LiteralString, - Self, - SupportsIndex, - TypeAlias, - overload, -) - +from collections.abc import Iterator, Sequence +from typing import Any, ClassVar, Generic, Literal, Self, SupportsIndex, overload from typing_extensions import TypeIs, TypeVar import numpy as np @@ -40,92 +28,74 @@ from ._polytypes import ( __all__ = ["ABCPolyBase"] -_NameCo = TypeVar( - "_NameCo", - bound=LiteralString | None, - covariant=True, - default=LiteralString | None -) -_Other = TypeVar("_Other", bound=ABCPolyBase) +_NameT_co = TypeVar("_NameT_co", bound=str | None, default=str | None, covariant=True) -_AnyOther: TypeAlias = ABCPolyBase | _CoefLike_co | _SeriesLikeCoef_co -_Hundred: TypeAlias = Literal[100] +type _AnyOther = ABCPolyBase | _CoefLike_co | _SeriesLikeCoef_co -class ABCPolyBase(Generic[_NameCo], abc.ABC): - __hash__: ClassVar[None] # type: ignore[assignment] # pyright: ignore[reportIncompatibleMethodOverride] - __array_ufunc__: ClassVar[None] +### - maxpower: ClassVar[_Hundred] - _superscript_mapping: ClassVar[Mapping[int, str]] - _subscript_mapping: ClassVar[Mapping[int, str]] - _use_unicode: ClassVar[bool] +class ABCPolyBase(Generic[_NameT_co], abc.ABC): # noqa: UP046 + __hash__: ClassVar[None] = None # type: ignore[assignment] # pyright: ignore[reportIncompatibleMethodOverride] + __array_ufunc__: ClassVar[None] = None + maxpower: ClassVar[Literal[100]] = 100 - basis_name: _NameCo - coef: _CoefSeries - domain: _Array2[np.inexact | np.object_] - window: _Array2[np.inexact | np.object_] + _superscript_mapping: ClassVar[dict[int, str]] = ... + _subscript_mapping: ClassVar[dict[int, str]] = ... + _use_unicode: ClassVar[bool] = ... - _symbol: LiteralString + _symbol: str + @property + def symbol(self, /) -> str: ... @property - def symbol(self, /) -> LiteralString: ... + @abc.abstractmethod + def domain(self) -> _Array2[np.float64 | Any]: ... + @property + @abc.abstractmethod + def window(self) -> _Array2[np.float64 | Any]: ... + @property + @abc.abstractmethod + def basis_name(self) -> _NameT_co: ... + + coef: _CoefSeries def __init__( self, /, coef: _SeriesLikeCoef_co, - domain: _SeriesLikeCoef_co | None = ..., - window: _SeriesLikeCoef_co | None = ..., - symbol: str = ..., + domain: _SeriesLikeCoef_co | None = None, + window: _SeriesLikeCoef_co | None = None, + symbol: str = "x", ) -> None: ... + # @overload - def __call__(self, /, arg: _Other) -> _Other: ... - # TODO: Once `_ShapeT@ndarray` is covariant and bounded (see #26081), - # additionally include 0-d arrays as input types with scalar return type. + def __call__[PolyT: ABCPolyBase](self, /, arg: PolyT) -> PolyT: ... @overload - def __call__( - self, - /, - arg: _FloatLike_co | decimal.Decimal | numbers.Real | np.object_, - ) -> np.float64 | np.complex128: ... + def __call__(self, /, arg: _FloatLike_co | decimal.Decimal) -> np.float64 | Any: ... @overload - def __call__( - self, - /, - arg: _NumberLike_co | numbers.Complex, - ) -> np.complex128: ... + def __call__(self, /, arg: _NumberLike_co) -> np.complex128 | Any: ... @overload - def __call__(self, /, arg: _ArrayLikeFloat_co) -> ( - npt.NDArray[np.float64] - | npt.NDArray[np.complex128] - | npt.NDArray[np.object_] - ): ... + def __call__(self, /, arg: _ArrayLikeFloat_co) -> npt.NDArray[np.float64 | Any]: ... @overload - def __call__( - self, - /, - arg: _ArrayLikeComplex_co, - ) -> npt.NDArray[np.complex128] | npt.NDArray[np.object_]: ... + def __call__(self, /, arg: _ArrayLikeComplex_co) -> npt.NDArray[np.complex128 | Any]: ... @overload - def __call__( - self, - /, - arg: _ArrayLikeCoefObject_co, - ) -> npt.NDArray[np.object_]: ... + def __call__(self, /, arg: _ArrayLikeCoefObject_co) -> npt.NDArray[np.object_]: ... - def __format__(self, fmt_str: str, /) -> str: ... - def __eq__(self, x: object, /) -> bool: ... - def __ne__(self, x: object, /) -> bool: ... + # unary ops def __neg__(self, /) -> Self: ... def __pos__(self, /) -> Self: ... + + # binary ops def __add__(self, x: _AnyOther, /) -> Self: ... def __sub__(self, x: _AnyOther, /) -> Self: ... def __mul__(self, x: _AnyOther, /) -> Self: ... + def __pow__(self, x: _AnyOther, /) -> Self: ... def __truediv__(self, x: _AnyOther, /) -> Self: ... def __floordiv__(self, x: _AnyOther, /) -> Self: ... def __mod__(self, x: _AnyOther, /) -> Self: ... def __divmod__(self, x: _AnyOther, /) -> _Tuple2[Self]: ... - def __pow__(self, x: _AnyOther, /) -> Self: ... + + # reflected binary ops def __radd__(self, x: _AnyOther, /) -> Self: ... def __rsub__(self, x: _AnyOther, /) -> Self: ... def __rmul__(self, x: _AnyOther, /) -> Self: ... @@ -133,72 +103,74 @@ class ABCPolyBase(Generic[_NameCo], abc.ABC): def __rfloordiv__(self, x: _AnyOther, /) -> Self: ... def __rmod__(self, x: _AnyOther, /) -> Self: ... def __rdivmod__(self, x: _AnyOther, /) -> _Tuple2[Self]: ... + + # iterable and sized def __len__(self, /) -> int: ... - def __iter__(self, /) -> Iterator[np.inexact | object]: ... + def __iter__(self, /) -> Iterator[np.float64 | Any]: ... + + # pickling def __getstate__(self, /) -> dict[str, Any]: ... def __setstate__(self, dict: dict[str, Any], /) -> None: ... + # def has_samecoef(self, /, other: ABCPolyBase) -> bool: ... def has_samedomain(self, /, other: ABCPolyBase) -> bool: ... def has_samewindow(self, /, other: ABCPolyBase) -> bool: ... - @overload - def has_sametype(self, /, other: ABCPolyBase) -> TypeIs[Self]: ... - @overload - def has_sametype(self, /, other: object) -> Literal[False]: ... + def has_sametype(self, /, other: object) -> TypeIs[Self]: ... + # def copy(self, /) -> Self: ... def degree(self, /) -> int: ... - def cutdeg(self, /) -> Self: ... - def trim(self, /, tol: _FloatLike_co = ...) -> Self: ... + def cutdeg(self, /, deg: int) -> Self: ... + def trim(self, /, tol: _FloatLike_co = 0) -> Self: ... def truncate(self, /, size: _AnyInt) -> Self: ... + # @overload - def convert( + def convert[PolyT: ABCPolyBase]( self, /, domain: _SeriesLikeCoef_co | None, - kind: type[_Other], - window: _SeriesLikeCoef_co | None = ..., - ) -> _Other: ... + kind: type[PolyT], + window: _SeriesLikeCoef_co | None = None, + ) -> PolyT: ... @overload - def convert( + def convert[PolyT: ABCPolyBase]( self, /, - domain: _SeriesLikeCoef_co | None = ..., + domain: _SeriesLikeCoef_co | None = None, *, - kind: type[_Other], - window: _SeriesLikeCoef_co | None = ..., - ) -> _Other: ... + kind: type[PolyT], + window: _SeriesLikeCoef_co | None = None, + ) -> PolyT: ... @overload def convert( self, /, - domain: _SeriesLikeCoef_co | None = ..., + domain: _SeriesLikeCoef_co | None = None, kind: None = None, - window: _SeriesLikeCoef_co | None = ..., + window: _SeriesLikeCoef_co | None = None, ) -> Self: ... + # def mapparms(self, /) -> _Tuple2[Any]: ... - def integ( self, /, - m: SupportsIndex = ..., - k: _CoefLike_co | _SeriesLikeCoef_co = ..., - lbnd: _CoefLike_co | None = ..., + m: SupportsIndex = 1, + k: _CoefLike_co | _SeriesLikeCoef_co = [], + lbnd: _CoefLike_co | None = None, ) -> Self: ... - - def deriv(self, /, m: SupportsIndex = ...) -> Self: ... - + def deriv(self, /, m: SupportsIndex = 1) -> Self: ... def roots(self, /) -> _CoefSeries: ... - def linspace( self, /, - n: SupportsIndex = ..., - domain: _SeriesLikeCoef_co | None = ..., + n: SupportsIndex = 100, + domain: _SeriesLikeCoef_co | None = None, ) -> _Tuple2[_Series[np.float64 | np.complex128]]: ... + # @overload @classmethod def fit( @@ -206,12 +178,12 @@ class ABCPolyBase(Generic[_NameCo], abc.ABC): x: _SeriesLikeCoef_co, y: _SeriesLikeCoef_co, deg: int | _SeriesLikeInt_co, - domain: _SeriesLikeCoef_co | None = ..., - rcond: _FloatLike_co = ..., - full: Literal[False] = ..., - w: _SeriesLikeCoef_co | None = ..., - window: _SeriesLikeCoef_co | None = ..., - symbol: str = ..., + domain: _SeriesLikeCoef_co | None = None, + rcond: _FloatLike_co | None = None, + full: Literal[False] = False, + w: _SeriesLikeCoef_co | None = None, + window: _SeriesLikeCoef_co | None = None, + symbol: str = "x", ) -> Self: ... @overload @classmethod @@ -220,13 +192,13 @@ class ABCPolyBase(Generic[_NameCo], abc.ABC): x: _SeriesLikeCoef_co, y: _SeriesLikeCoef_co, deg: int | _SeriesLikeInt_co, - domain: _SeriesLikeCoef_co | None = ..., - rcond: _FloatLike_co = ..., + domain: _SeriesLikeCoef_co | None = None, + rcond: _FloatLike_co | None = None, *, full: Literal[True], - w: _SeriesLikeCoef_co | None = ..., - window: _SeriesLikeCoef_co | None = ..., - symbol: str = ..., + w: _SeriesLikeCoef_co | None = None, + window: _SeriesLikeCoef_co | None = None, + symbol: str = "x", ) -> tuple[Self, Sequence[np.inexact | np.int32]]: ... @overload @classmethod @@ -237,49 +209,47 @@ class ABCPolyBase(Generic[_NameCo], abc.ABC): deg: int | _SeriesLikeInt_co, domain: _SeriesLikeCoef_co | None, rcond: _FloatLike_co, - full: Literal[True], /, - w: _SeriesLikeCoef_co | None = ..., - window: _SeriesLikeCoef_co | None = ..., - symbol: str = ..., + full: Literal[True], + /, + w: _SeriesLikeCoef_co | None = None, + window: _SeriesLikeCoef_co | None = None, + symbol: str = "x", ) -> tuple[Self, Sequence[np.inexact | np.int32]]: ... + # @classmethod def fromroots( cls, roots: _ArrayLikeCoef_co, - domain: _SeriesLikeCoef_co | None = ..., - window: _SeriesLikeCoef_co | None = ..., - symbol: str = ..., + domain: _SeriesLikeCoef_co | None = [], + window: _SeriesLikeCoef_co | None = None, + symbol: str = "x", ) -> Self: ... - @classmethod def identity( cls, - domain: _SeriesLikeCoef_co | None = ..., - window: _SeriesLikeCoef_co | None = ..., - symbol: str = ..., + domain: _SeriesLikeCoef_co | None = None, + window: _SeriesLikeCoef_co | None = None, + symbol: str = "x", ) -> Self: ... - @classmethod def basis( cls, deg: _AnyInt, - domain: _SeriesLikeCoef_co | None = ..., - window: _SeriesLikeCoef_co | None = ..., - symbol: str = ..., + domain: _SeriesLikeCoef_co | None = None, + window: _SeriesLikeCoef_co | None = None, + symbol: str = "x", ) -> Self: ... - @classmethod def cast( cls, series: ABCPolyBase, - domain: _SeriesLikeCoef_co | None = ..., - window: _SeriesLikeCoef_co | None = ..., + domain: _SeriesLikeCoef_co | None = None, + window: _SeriesLikeCoef_co | None = None, ) -> Self: ... - @classmethod def _str_term_unicode(cls, /, i: str, arg_str: str) -> str: ... - @staticmethod - def _str_term_ascii(i: str, arg_str: str) -> str: ... - @staticmethod - def _repr_latex_term(i: str, arg_str: str, needs_parens: bool) -> str: ... + @classmethod + def _str_term_ascii(cls, /, i: str, arg_str: str) -> str: ... + @classmethod + def _repr_latex_term(cls, /, i: str, arg_str: str, needs_parens: bool) -> str: ... diff --git a/numpy/polynomial/_polytypes.pyi b/numpy/polynomial/_polytypes.pyi index 241a65be2fa2..46d17ac6353c 100644 --- a/numpy/polynomial/_polytypes.pyi +++ b/numpy/polynomial/_polytypes.pyi @@ -1,17 +1,12 @@ -# ruff: noqa: PYI046, PYI047 - -from collections.abc import Callable, Sequence +from collections.abc import Sequence from typing import ( Any, Literal, - LiteralString, NoReturn, Protocol, Self, SupportsIndex, SupportsInt, - TypeAlias, - TypeVar, overload, type_check_only, ) @@ -20,138 +15,86 @@ import numpy as np import numpy.typing as npt from numpy._typing import ( _ArrayLikeComplex_co, - # array-likes _ArrayLikeFloat_co, _ArrayLikeNumber_co, - _ArrayLikeObject_co, _ComplexLike_co, _FloatLike_co, - # scalar-likes _IntLike_co, _NestedSequence, _NumberLike_co, _SupportsArray, ) -_T = TypeVar("_T") -_T_contra = TypeVar("_T_contra", contravariant=True) -_ScalarT = TypeVar("_ScalarT", bound=np.number | np.bool | np.object_) - # compatible with e.g. int, float, complex, Decimal, Fraction, and ABCPolyBase @type_check_only -class _SupportsCoefOps(Protocol[_T_contra]): +class _SupportsCoefOps[T](Protocol): def __eq__(self, x: object, /) -> bool: ... def __ne__(self, x: object, /) -> bool: ... - def __neg__(self, /) -> Self: ... def __pos__(self, /) -> Self: ... + def __add__(self, x: T, /) -> Self: ... + def __sub__(self, x: T, /) -> Self: ... + def __mul__(self, x: T, /) -> Self: ... + def __pow__(self, x: T, /) -> Self | float: ... + def __radd__(self, x: T, /) -> Self: ... + def __rsub__(self, x: T, /) -> Self: ... + def __rmul__(self, x: T, /) -> Self: ... - def __add__(self, x: _T_contra, /) -> Self: ... - def __sub__(self, x: _T_contra, /) -> Self: ... - def __mul__(self, x: _T_contra, /) -> Self: ... - def __pow__(self, x: _T_contra, /) -> Self | float: ... +type _PolyScalar = np.bool | np.number | np.object_ - def __radd__(self, x: _T_contra, /) -> Self: ... - def __rsub__(self, x: _T_contra, /) -> Self: ... - def __rmul__(self, x: _T_contra, /) -> Self: ... +type _Series[ScalarT: _PolyScalar] = np.ndarray[tuple[int], np.dtype[ScalarT]] -_Series: TypeAlias = np.ndarray[tuple[int], np.dtype[_ScalarT]] +type _FloatSeries = _Series[np.floating] +type _ComplexSeries = _Series[np.complexfloating] +type _ObjectSeries = _Series[np.object_] +type _CoefSeries = _Series[np.inexact | np.object_] -_FloatSeries: TypeAlias = _Series[np.floating] -_ComplexSeries: TypeAlias = _Series[np.complexfloating] -_ObjectSeries: TypeAlias = _Series[np.object_] -_CoefSeries: TypeAlias = _Series[np.inexact | np.object_] +type _FloatArray = npt.NDArray[np.floating] +type _ComplexArray = npt.NDArray[np.complexfloating] +type _ObjectArray = npt.NDArray[np.object_] +type _CoefArray = npt.NDArray[np.inexact | np.object_] -_FloatArray: TypeAlias = npt.NDArray[np.floating] -_ComplexArray: TypeAlias = npt.NDArray[np.complexfloating] -_ObjectArray: TypeAlias = npt.NDArray[np.object_] -_CoefArray: TypeAlias = npt.NDArray[np.inexact | np.object_] +type _Tuple2[_T] = tuple[_T, _T] +type _Array1[ScalarT: _PolyScalar] = np.ndarray[tuple[Literal[1]], np.dtype[ScalarT]] +type _Array2[ScalarT: _PolyScalar] = np.ndarray[tuple[Literal[2]], np.dtype[ScalarT]] -_Tuple2: TypeAlias = tuple[_T, _T] -_Array1: TypeAlias = np.ndarray[tuple[Literal[1]], np.dtype[_ScalarT]] -_Array2: TypeAlias = np.ndarray[tuple[Literal[2]], np.dtype[_ScalarT]] +type _AnyInt = SupportsInt | SupportsIndex -_AnyInt: TypeAlias = SupportsInt | SupportsIndex - -_CoefObjectLike_co: TypeAlias = np.object_ | _SupportsCoefOps[Any] -_CoefLike_co: TypeAlias = _NumberLike_co | _CoefObjectLike_co +type _CoefObjectLike_co = np.object_ | _SupportsCoefOps[Any] +type _CoefLike_co = _NumberLike_co | _CoefObjectLike_co # The term "series" is used here to refer to 1-d arrays of numeric scalars. -_SeriesLikeBool_co: TypeAlias = ( - _SupportsArray[np.dtype[np.bool]] - | Sequence[bool | np.bool] -) -_SeriesLikeInt_co: TypeAlias = ( - _SupportsArray[np.dtype[np.integer | np.bool]] - | Sequence[_IntLike_co] -) -_SeriesLikeFloat_co: TypeAlias = ( - _SupportsArray[np.dtype[np.floating | np.integer | np.bool]] - | Sequence[_FloatLike_co] -) -_SeriesLikeComplex_co: TypeAlias = ( - _SupportsArray[np.dtype[np.inexact | np.integer | np.bool]] - | Sequence[_ComplexLike_co] -) -_SeriesLikeObject_co: TypeAlias = ( - _SupportsArray[np.dtype[np.object_]] - | Sequence[_CoefObjectLike_co] -) -_SeriesLikeCoef_co: TypeAlias = ( - _SupportsArray[np.dtype[np.number | np.bool | np.object_]] - | Sequence[_CoefLike_co] -) +type _SeriesLikeBool_co = _SupportsArray[np.dtype[np.bool]] | Sequence[bool | np.bool] +type _SeriesLikeInt_co = _SupportsArray[np.dtype[np.integer | np.bool]] | Sequence[_IntLike_co] +type _SeriesLikeFloat_co = _SupportsArray[np.dtype[np.floating | np.integer | np.bool]] | Sequence[_FloatLike_co] +type _SeriesLikeComplex_co = _SupportsArray[np.dtype[np.number | np.bool]] | Sequence[_ComplexLike_co] +type _SeriesLikeObject_co = _SupportsArray[np.dtype[np.object_]] | Sequence[_CoefObjectLike_co] +type _SeriesLikeCoef_co = _SupportsArray[np.dtype[_PolyScalar]] | Sequence[_CoefLike_co] -_ArrayLikeCoefObject_co: TypeAlias = ( - _CoefObjectLike_co - | _SeriesLikeObject_co - | _NestedSequence[_SeriesLikeObject_co] -) -_ArrayLikeCoef_co: TypeAlias = ( - npt.NDArray[np.number | np.bool | np.object_] - | _ArrayLikeNumber_co - | _ArrayLikeCoefObject_co -) +type _ArrayLikeCoefObject_co = _CoefObjectLike_co | _SeriesLikeObject_co | _NestedSequence[_SeriesLikeObject_co] +type _ArrayLikeCoef_co = npt.NDArray[_PolyScalar] | _ArrayLikeNumber_co | _ArrayLikeCoefObject_co -_Name_co = TypeVar( - "_Name_co", - bound=LiteralString, - covariant=True, - default=LiteralString -) - -@type_check_only -class _Named(Protocol[_Name_co]): - @property - def __name__(self, /) -> _Name_co: ... +type _Line[ScalarT: _PolyScalar] = np.ndarray[tuple[int], np.dtype[ScalarT]] +type _Companion[ScalarT: _PolyScalar] = np.ndarray[tuple[int, int], np.dtype[ScalarT]] -_Line: TypeAlias = np.ndarray[tuple[Literal[1, 2]], np.dtype[_ScalarT]] +type _AnyDegrees = Sequence[SupportsIndex] +type _FullFitResult = Sequence[np.inexact | np.int32] @type_check_only -class _FuncLine(_Named[_Name_co], Protocol[_Name_co]): +class _FuncLine(Protocol): @overload - def __call__(self, /, off: _ScalarT, scl: _ScalarT) -> _Line[_ScalarT]: ... + def __call__[ScalarT: _PolyScalar](self, /, off: ScalarT, scl: ScalarT) -> _Line[ScalarT]: ... @overload def __call__(self, /, off: int, scl: int) -> _Line[np.int_]: ... @overload def __call__(self, /, off: float, scl: float) -> _Line[np.float64]: ... @overload - def __call__( - self, - /, - off: complex, - scl: complex, - ) -> _Line[np.complex128]: ... + def __call__(self, /, off: complex, scl: complex) -> _Line[np.complex128]: ... @overload - def __call__( - self, - /, - off: _SupportsCoefOps[Any], - scl: _SupportsCoefOps[Any], - ) -> _Line[np.object_]: ... + def __call__(self, /, off: _SupportsCoefOps[Any], scl: _SupportsCoefOps[Any]) -> _Line[np.object_]: ... @type_check_only -class _FuncFromRoots(_Named[_Name_co], Protocol[_Name_co]): +class _FuncFromRoots(Protocol): @overload def __call__(self, /, roots: _SeriesLikeFloat_co) -> _FloatSeries: ... @overload @@ -160,38 +103,18 @@ class _FuncFromRoots(_Named[_Name_co], Protocol[_Name_co]): def __call__(self, /, roots: _SeriesLikeCoef_co) -> _ObjectSeries: ... @type_check_only -class _FuncBinOp(_Named[_Name_co], Protocol[_Name_co]): +class _FuncBinOp(Protocol): @overload - def __call__( - self, - /, - c1: _SeriesLikeBool_co, - c2: _SeriesLikeBool_co, - ) -> NoReturn: ... + def __call__(self, /, c1: _SeriesLikeBool_co, c2: _SeriesLikeBool_co) -> NoReturn: ... @overload - def __call__( - self, - /, - c1: _SeriesLikeFloat_co, - c2: _SeriesLikeFloat_co, - ) -> _FloatSeries: ... + def __call__(self, /, c1: _SeriesLikeFloat_co, c2: _SeriesLikeFloat_co) -> _FloatSeries: ... @overload - def __call__( - self, - /, - c1: _SeriesLikeComplex_co, - c2: _SeriesLikeComplex_co, - ) -> _ComplexSeries: ... + def __call__(self, /, c1: _SeriesLikeComplex_co, c2: _SeriesLikeComplex_co) -> _ComplexSeries: ... @overload - def __call__( - self, - /, - c1: _SeriesLikeCoef_co, - c2: _SeriesLikeCoef_co, - ) -> _ObjectSeries: ... + def __call__(self, /, c1: _SeriesLikeCoef_co, c2: _SeriesLikeCoef_co) -> _ObjectSeries: ... @type_check_only -class _FuncUnOp(_Named[_Name_co], Protocol[_Name_co]): +class _FuncUnOp(Protocol): @overload def __call__(self, /, c: _SeriesLikeFloat_co) -> _FloatSeries: ... @overload @@ -200,7 +123,7 @@ class _FuncUnOp(_Named[_Name_co], Protocol[_Name_co]): def __call__(self, /, c: _SeriesLikeCoef_co) -> _ObjectSeries: ... @type_check_only -class _FuncPoly2Ortho(_Named[_Name_co], Protocol[_Name_co]): +class _FuncPoly2Ortho(Protocol): @overload def __call__(self, /, pol: _SeriesLikeFloat_co) -> _FloatSeries: ... @overload @@ -209,253 +132,112 @@ class _FuncPoly2Ortho(_Named[_Name_co], Protocol[_Name_co]): def __call__(self, /, pol: _SeriesLikeCoef_co) -> _ObjectSeries: ... @type_check_only -class _FuncPow(_Named[_Name_co], Protocol[_Name_co]): +class _FuncPow(Protocol): @overload - def __call__( - self, - /, - c: _SeriesLikeFloat_co, - pow: _IntLike_co, - maxpower: _IntLike_co | None = ..., - ) -> _FloatSeries: ... + def __call__(self, /, c: _SeriesLikeFloat_co, pow: _IntLike_co, maxpower: _IntLike_co | None = ...) -> _FloatSeries: ... @overload - def __call__( - self, - /, - c: _SeriesLikeComplex_co, - pow: _IntLike_co, - maxpower: _IntLike_co | None = ..., - ) -> _ComplexSeries: ... + def __call__(self, /, c: _SeriesLikeComplex_co, pow: _IntLike_co, maxpower: _IntLike_co | None = ...) -> _ComplexSeries: ... @overload - def __call__( - self, - /, - c: _SeriesLikeCoef_co, - pow: _IntLike_co, - maxpower: _IntLike_co | None = ..., - ) -> _ObjectSeries: ... + def __call__(self, /, c: _SeriesLikeCoef_co, pow: _IntLike_co, maxpower: _IntLike_co | None = ...) -> _ObjectSeries: ... @type_check_only -class _FuncDer(_Named[_Name_co], Protocol[_Name_co]): +class _FuncDer(Protocol): @overload def __call__( self, /, c: _ArrayLikeFloat_co, - m: SupportsIndex = ..., - scl: _FloatLike_co = ..., - axis: SupportsIndex = ..., + m: SupportsIndex = 1, + scl: _FloatLike_co = 1, + axis: SupportsIndex = 0, ) -> _FloatArray: ... @overload def __call__( self, /, c: _ArrayLikeComplex_co, - m: SupportsIndex = ..., - scl: _ComplexLike_co = ..., - axis: SupportsIndex = ..., + m: SupportsIndex = 1, + scl: _ComplexLike_co = 1, + axis: SupportsIndex = 0, ) -> _ComplexArray: ... @overload def __call__( self, /, c: _ArrayLikeCoef_co, - m: SupportsIndex = ..., - scl: _CoefLike_co = ..., - axis: SupportsIndex = ..., + m: SupportsIndex = 1, + scl: _CoefLike_co = 1, + axis: SupportsIndex = 0, ) -> _ObjectArray: ... @type_check_only -class _FuncInteg(_Named[_Name_co], Protocol[_Name_co]): +class _FuncInteg(Protocol): @overload def __call__( self, /, c: _ArrayLikeFloat_co, - m: SupportsIndex = ..., - k: _FloatLike_co | _SeriesLikeFloat_co = ..., - lbnd: _FloatLike_co = ..., - scl: _FloatLike_co = ..., - axis: SupportsIndex = ..., + m: SupportsIndex = 1, + k: _FloatLike_co | _SeriesLikeFloat_co = [], + lbnd: _FloatLike_co = 0, + scl: _FloatLike_co = 1, + axis: SupportsIndex = 0, ) -> _FloatArray: ... @overload def __call__( self, /, c: _ArrayLikeComplex_co, - m: SupportsIndex = ..., - k: _ComplexLike_co | _SeriesLikeComplex_co = ..., - lbnd: _ComplexLike_co = ..., - scl: _ComplexLike_co = ..., - axis: SupportsIndex = ..., + m: SupportsIndex = 1, + k: _ComplexLike_co | _SeriesLikeComplex_co = [], + lbnd: _ComplexLike_co = 0, + scl: _ComplexLike_co = 1, + axis: SupportsIndex = 0, ) -> _ComplexArray: ... @overload def __call__( self, /, c: _ArrayLikeCoef_co, - m: SupportsIndex = ..., - k: _CoefLike_co | _SeriesLikeCoef_co = ..., - lbnd: _CoefLike_co = ..., - scl: _CoefLike_co = ..., - axis: SupportsIndex = ..., + m: SupportsIndex = 1, + k: _CoefLike_co | _SeriesLikeCoef_co = [], + lbnd: _CoefLike_co = 0, + scl: _CoefLike_co = 1, + axis: SupportsIndex = 0, ) -> _ObjectArray: ... @type_check_only -class _FuncValFromRoots(_Named[_Name_co], Protocol[_Name_co]): +class _FuncVal(Protocol): @overload - def __call__( - self, - /, - x: _FloatLike_co, - r: _FloatLike_co, - tensor: bool = ..., - ) -> np.floating: ... + def __call__(self, /, x: _FloatLike_co, c: _SeriesLikeFloat_co, tensor: bool = True) -> np.floating: ... @overload - def __call__( - self, - /, - x: _NumberLike_co, - r: _NumberLike_co, - tensor: bool = ..., - ) -> np.complexfloating: ... + def __call__(self, /, x: _NumberLike_co, c: _SeriesLikeComplex_co, tensor: bool = True) -> np.complexfloating: ... @overload - def __call__( - self, - /, - x: _FloatLike_co | _ArrayLikeFloat_co, - r: _ArrayLikeFloat_co, - tensor: bool = ..., - ) -> _FloatArray: ... + def __call__(self, /, x: _ArrayLikeFloat_co, c: _ArrayLikeFloat_co, tensor: bool = True) -> _FloatArray: ... @overload - def __call__( - self, - /, - x: _NumberLike_co | _ArrayLikeComplex_co, - r: _ArrayLikeComplex_co, - tensor: bool = ..., - ) -> _ComplexArray: ... + def __call__(self, /, x: _ArrayLikeComplex_co, c: _ArrayLikeComplex_co, tensor: bool = True) -> _ComplexArray: ... @overload - def __call__( - self, - /, - x: _CoefLike_co | _ArrayLikeCoef_co, - r: _ArrayLikeCoef_co, - tensor: bool = ..., - ) -> _ObjectArray: ... + def __call__(self, /, x: _ArrayLikeCoef_co, c: _ArrayLikeCoef_co, tensor: bool = True) -> _ObjectArray: ... @overload - def __call__( - self, - /, - x: _CoefLike_co, - r: _CoefLike_co, - tensor: bool = ..., - ) -> _SupportsCoefOps[Any]: ... + def __call__(self, /, x: _CoefLike_co, c: _SeriesLikeObject_co, tensor: bool = True) -> _SupportsCoefOps[Any]: ... @type_check_only -class _FuncVal(_Named[_Name_co], Protocol[_Name_co]): +class _FuncVal2D(Protocol): @overload - def __call__( - self, - /, - x: _FloatLike_co, - c: _SeriesLikeFloat_co, - tensor: bool = ..., - ) -> np.floating: ... + def __call__(self, /, x: _FloatLike_co, y: _FloatLike_co, c: _SeriesLikeFloat_co) -> np.floating: ... @overload - def __call__( - self, - /, - x: _NumberLike_co, - c: _SeriesLikeComplex_co, - tensor: bool = ..., - ) -> np.complexfloating: ... + def __call__(self, /, x: _NumberLike_co, y: _NumberLike_co, c: _SeriesLikeComplex_co) -> np.complexfloating: ... @overload - def __call__( - self, - /, - x: _ArrayLikeFloat_co, - c: _ArrayLikeFloat_co, - tensor: bool = ..., - ) -> _FloatArray: ... + def __call__(self, /, x: _ArrayLikeFloat_co, y: _ArrayLikeFloat_co, c: _ArrayLikeFloat_co) -> _FloatArray: ... @overload - def __call__( - self, - /, - x: _ArrayLikeComplex_co, - c: _ArrayLikeComplex_co, - tensor: bool = ..., - ) -> _ComplexArray: ... + def __call__(self, /, x: _ArrayLikeComplex_co, y: _ArrayLikeComplex_co, c: _ArrayLikeComplex_co) -> _ComplexArray: ... @overload - def __call__( - self, - /, - x: _ArrayLikeCoef_co, - c: _ArrayLikeCoef_co, - tensor: bool = ..., - ) -> _ObjectArray: ... + def __call__(self, /, x: _ArrayLikeCoef_co, y: _ArrayLikeCoef_co, c: _ArrayLikeCoef_co) -> _ObjectArray: ... @overload - def __call__( - self, - /, - x: _CoefLike_co, - c: _SeriesLikeObject_co, - tensor: bool = ..., - ) -> _SupportsCoefOps[Any]: ... + def __call__(self, /, x: _CoefLike_co, y: _CoefLike_co, c: _SeriesLikeCoef_co) -> _SupportsCoefOps[Any]: ... @type_check_only -class _FuncVal2D(_Named[_Name_co], Protocol[_Name_co]): - @overload - def __call__( - self, - /, - x: _FloatLike_co, - y: _FloatLike_co, - c: _SeriesLikeFloat_co, - ) -> np.floating: ... - @overload - def __call__( - self, - /, - x: _NumberLike_co, - y: _NumberLike_co, - c: _SeriesLikeComplex_co, - ) -> np.complexfloating: ... - @overload - def __call__( - self, - /, - x: _ArrayLikeFloat_co, - y: _ArrayLikeFloat_co, - c: _ArrayLikeFloat_co, - ) -> _FloatArray: ... - @overload - def __call__( - self, - /, - x: _ArrayLikeComplex_co, - y: _ArrayLikeComplex_co, - c: _ArrayLikeComplex_co, - ) -> _ComplexArray: ... - @overload - def __call__( - self, - /, - x: _ArrayLikeCoef_co, - y: _ArrayLikeCoef_co, - c: _ArrayLikeCoef_co, - ) -> _ObjectArray: ... - @overload - def __call__( - self, - /, - x: _CoefLike_co, - y: _CoefLike_co, - c: _SeriesLikeCoef_co, - ) -> _SupportsCoefOps[Any]: ... - -@type_check_only -class _FuncVal3D(_Named[_Name_co], Protocol[_Name_co]): +class _FuncVal3D(Protocol): @overload def __call__( self, @@ -463,7 +245,7 @@ class _FuncVal3D(_Named[_Name_co], Protocol[_Name_co]): x: _FloatLike_co, y: _FloatLike_co, z: _FloatLike_co, - c: _SeriesLikeFloat_co + c: _SeriesLikeFloat_co, ) -> np.floating: ... @overload def __call__( @@ -511,132 +293,30 @@ class _FuncVal3D(_Named[_Name_co], Protocol[_Name_co]): c: _SeriesLikeCoef_co, ) -> _SupportsCoefOps[Any]: ... -_AnyValF: TypeAlias = Callable[ - [npt.ArrayLike, npt.ArrayLike, bool], - _CoefArray, -] - @type_check_only -class _FuncValND(_Named[_Name_co], Protocol[_Name_co]): - @overload - def __call__( - self, - val_f: _AnyValF, - c: _SeriesLikeFloat_co, - /, - *args: _FloatLike_co, - ) -> np.floating: ... - @overload - def __call__( - self, - val_f: _AnyValF, - c: _SeriesLikeComplex_co, - /, - *args: _NumberLike_co, - ) -> np.complexfloating: ... +class _FuncVander(Protocol): @overload - def __call__( - self, - val_f: _AnyValF, - c: _ArrayLikeFloat_co, - /, - *args: _ArrayLikeFloat_co, - ) -> _FloatArray: ... + def __call__(self, /, x: _ArrayLikeFloat_co, deg: SupportsIndex) -> _FloatArray: ... @overload - def __call__( - self, - val_f: _AnyValF, - c: _ArrayLikeComplex_co, - /, - *args: _ArrayLikeComplex_co, - ) -> _ComplexArray: ... + def __call__(self, /, x: _ArrayLikeComplex_co, deg: SupportsIndex) -> _ComplexArray: ... @overload - def __call__( - self, - val_f: _AnyValF, - c: _SeriesLikeObject_co, - /, - *args: _CoefObjectLike_co, - ) -> _SupportsCoefOps[Any]: ... + def __call__(self, /, x: _ArrayLikeCoef_co, deg: SupportsIndex) -> _ObjectArray: ... @overload - def __call__( - self, - val_f: _AnyValF, - c: _ArrayLikeCoef_co, - /, - *args: _ArrayLikeCoef_co, - ) -> _ObjectArray: ... + def __call__(self, /, x: npt.ArrayLike, deg: SupportsIndex) -> _CoefArray: ... @type_check_only -class _FuncVander(_Named[_Name_co], Protocol[_Name_co]): +class _FuncVander2D(Protocol): @overload - def __call__( - self, - /, - x: _ArrayLikeFloat_co, - deg: SupportsIndex, - ) -> _FloatArray: ... + def __call__(self, /, x: _ArrayLikeFloat_co, y: _ArrayLikeFloat_co, deg: _AnyDegrees) -> _FloatArray: ... @overload - def __call__( - self, - /, - x: _ArrayLikeComplex_co, - deg: SupportsIndex, - ) -> _ComplexArray: ... - @overload - def __call__( - self, - /, - x: _ArrayLikeCoef_co, - deg: SupportsIndex, - ) -> _ObjectArray: ... - @overload - def __call__( - self, - /, - x: npt.ArrayLike, - deg: SupportsIndex, - ) -> _CoefArray: ... - -_AnyDegrees: TypeAlias = Sequence[SupportsIndex] - -@type_check_only -class _FuncVander2D(_Named[_Name_co], Protocol[_Name_co]): - @overload - def __call__( - self, - /, - x: _ArrayLikeFloat_co, - y: _ArrayLikeFloat_co, - deg: _AnyDegrees, - ) -> _FloatArray: ... - @overload - def __call__( - self, - /, - x: _ArrayLikeComplex_co, - y: _ArrayLikeComplex_co, - deg: _AnyDegrees, - ) -> _ComplexArray: ... + def __call__(self, /, x: _ArrayLikeComplex_co, y: _ArrayLikeComplex_co, deg: _AnyDegrees) -> _ComplexArray: ... @overload - def __call__( - self, - /, - x: _ArrayLikeCoef_co, - y: _ArrayLikeCoef_co, - deg: _AnyDegrees, - ) -> _ObjectArray: ... + def __call__(self, /, x: _ArrayLikeCoef_co, y: _ArrayLikeCoef_co, deg: _AnyDegrees) -> _ObjectArray: ... @overload - def __call__( - self, - /, - x: npt.ArrayLike, - y: npt.ArrayLike, - deg: _AnyDegrees, - ) -> _CoefArray: ... + def __call__(self, /, x: npt.ArrayLike, y: npt.ArrayLike, deg: _AnyDegrees) -> _CoefArray: ... @type_check_only -class _FuncVander3D(_Named[_Name_co], Protocol[_Name_co]): +class _FuncVander3D(Protocol): @overload def __call__( self, @@ -674,53 +354,8 @@ class _FuncVander3D(_Named[_Name_co], Protocol[_Name_co]): deg: _AnyDegrees, ) -> _CoefArray: ... -# keep in sync with the broadest overload of `._FuncVander` -_AnyFuncVander: TypeAlias = Callable[ - [npt.ArrayLike, SupportsIndex], - _CoefArray, -] - @type_check_only -class _FuncVanderND(_Named[_Name_co], Protocol[_Name_co]): - @overload - def __call__( - self, - /, - vander_fs: Sequence[_AnyFuncVander], - points: Sequence[_ArrayLikeFloat_co], - degrees: Sequence[SupportsIndex], - ) -> _FloatArray: ... - @overload - def __call__( - self, - /, - vander_fs: Sequence[_AnyFuncVander], - points: Sequence[_ArrayLikeComplex_co], - degrees: Sequence[SupportsIndex], - ) -> _ComplexArray: ... - @overload - def __call__( - self, - /, - vander_fs: Sequence[_AnyFuncVander], - points: Sequence[ - _ArrayLikeObject_co | _ArrayLikeComplex_co, - ], - degrees: Sequence[SupportsIndex], - ) -> _ObjectArray: ... - @overload - def __call__( - self, - /, - vander_fs: Sequence[_AnyFuncVander], - points: Sequence[npt.ArrayLike], - degrees: Sequence[SupportsIndex], - ) -> _CoefArray: ... - -_FullFitResult: TypeAlias = Sequence[np.inexact | np.int32] - -@type_check_only -class _FuncFit(_Named[_Name_co], Protocol[_Name_co]): +class _FuncFit(Protocol): @overload def __call__( self, @@ -729,7 +364,7 @@ class _FuncFit(_Named[_Name_co], Protocol[_Name_co]): y: _ArrayLikeFloat_co, deg: int | _SeriesLikeInt_co, rcond: float | None = ..., - full: Literal[False] = ..., + full: Literal[False] = False, w: _SeriesLikeFloat_co | None = ..., ) -> _FloatArray: ... @overload @@ -755,7 +390,6 @@ class _FuncFit(_Named[_Name_co], Protocol[_Name_co]): full: Literal[True], w: _SeriesLikeFloat_co | None = ..., ) -> tuple[_FloatArray, _FullFitResult]: ... - @overload def __call__( self, @@ -764,7 +398,7 @@ class _FuncFit(_Named[_Name_co], Protocol[_Name_co]): y: _ArrayLikeComplex_co, deg: int | _SeriesLikeInt_co, rcond: float | None = ..., - full: Literal[False] = ..., + full: Literal[False] = False, w: _SeriesLikeFloat_co | None = ..., ) -> _ComplexArray: ... @overload @@ -790,7 +424,6 @@ class _FuncFit(_Named[_Name_co], Protocol[_Name_co]): full: Literal[True], w: _SeriesLikeFloat_co | None = ..., ) -> tuple[_ComplexArray, _FullFitResult]: ... - @overload def __call__( self, @@ -799,7 +432,7 @@ class _FuncFit(_Named[_Name_co], Protocol[_Name_co]): y: _ArrayLikeCoef_co, deg: int | _SeriesLikeInt_co, rcond: float | None = ..., - full: Literal[False] = ..., + full: Literal[False] = False, w: _SeriesLikeFloat_co | None = ..., ) -> _ObjectArray: ... @overload @@ -827,66 +460,32 @@ class _FuncFit(_Named[_Name_co], Protocol[_Name_co]): ) -> tuple[_ObjectArray, _FullFitResult]: ... @type_check_only -class _FuncRoots(_Named[_Name_co], Protocol[_Name_co]): +class _FuncRoots(Protocol): @overload - def __call__( - self, - /, - c: _SeriesLikeFloat_co, - ) -> _Series[np.float64]: ... + def __call__(self, /, c: _SeriesLikeFloat_co) -> _Series[np.float64]: ... @overload - def __call__( - self, - /, - c: _SeriesLikeComplex_co, - ) -> _Series[np.complex128]: ... + def __call__(self, /, c: _SeriesLikeComplex_co) -> _Series[np.complex128]: ... @overload def __call__(self, /, c: _SeriesLikeCoef_co) -> _ObjectSeries: ... -_Companion: TypeAlias = np.ndarray[tuple[int, int], np.dtype[_ScalarT]] - @type_check_only -class _FuncCompanion(_Named[_Name_co], Protocol[_Name_co]): +class _FuncCompanion(Protocol): @overload - def __call__( - self, - /, - c: _SeriesLikeFloat_co, - ) -> _Companion[np.float64]: ... + def __call__(self, /, c: _SeriesLikeFloat_co) -> _Companion[np.float64]: ... @overload - def __call__( - self, - /, - c: _SeriesLikeComplex_co, - ) -> _Companion[np.complex128]: ... + def __call__(self, /, c: _SeriesLikeComplex_co) -> _Companion[np.complex128]: ... @overload def __call__(self, /, c: _SeriesLikeCoef_co) -> _Companion[np.object_]: ... @type_check_only -class _FuncGauss(_Named[_Name_co], Protocol[_Name_co]): - def __call__( - self, - /, - deg: SupportsIndex, - ) -> _Tuple2[_Series[np.float64]]: ... +class _FuncGauss(Protocol): + def __call__(self, /, deg: SupportsIndex) -> _Tuple2[_Series[np.float64]]: ... @type_check_only -class _FuncWeight(_Named[_Name_co], Protocol[_Name_co]): +class _FuncWeight(Protocol): @overload - def __call__( - self, - /, - c: _ArrayLikeFloat_co, - ) -> npt.NDArray[np.float64]: ... + def __call__(self, /, x: _ArrayLikeFloat_co) -> npt.NDArray[np.float64]: ... @overload - def __call__( - self, - /, - c: _ArrayLikeComplex_co, - ) -> npt.NDArray[np.complex128]: ... + def __call__(self, /, x: _ArrayLikeComplex_co) -> npt.NDArray[np.complex128]: ... @overload - def __call__(self, /, c: _ArrayLikeCoef_co) -> _ObjectArray: ... - -@type_check_only -class _FuncPts(_Named[_Name_co], Protocol[_Name_co]): - def __call__(self, /, npts: _AnyInt) -> _Series[np.float64]: ... + def __call__(self, /, x: _ArrayLikeCoef_co) -> _ObjectArray: ... diff --git a/numpy/polynomial/chebyshev.py b/numpy/polynomial/chebyshev.py index 58fce6046287..55b48b905848 100644 --- a/numpy/polynomial/chebyshev.py +++ b/numpy/polynomial/chebyshev.py @@ -108,8 +108,6 @@ """ # noqa: E501 import numpy as np -import numpy.linalg as la -from numpy.lib.array_utils import normalize_axis_index from . import polyutils as pu from ._polybase import ABCPolyBase @@ -936,7 +934,7 @@ def chebder(c, m=1, scl=1, axis=0): iaxis = pu._as_int(axis, "the axis") if cnt < 0: raise ValueError("The order of derivation must be non-negative") - iaxis = normalize_axis_index(iaxis, c.ndim) + iaxis = np.lib.array_utils.normalize_axis_index(iaxis, c.ndim) if cnt == 0: return c @@ -1059,7 +1057,7 @@ def chebint(c, m=1, k=[], lbnd=0, scl=1, axis=0): raise ValueError("lbnd must be a scalar.") if np.ndim(scl) != 0: raise ValueError("scl must be a scalar.") - iaxis = normalize_axis_index(iaxis, c.ndim) + iaxis = np.lib.array_utils.normalize_axis_index(iaxis, c.ndim) if cnt == 0: return c @@ -1721,7 +1719,7 @@ def chebroots(c): # rotated companion matrix reduces error m = chebcompanion(c)[::-1, ::-1] - r = la.eigvals(m) + r = np.linalg.eigvals(m) r.sort() return r diff --git a/numpy/polynomial/chebyshev.pyi b/numpy/polynomial/chebyshev.pyi index ec342df0f9d1..157b0e5d0f46 100644 --- a/numpy/polynomial/chebyshev.pyi +++ b/numpy/polynomial/chebyshev.pyi @@ -1,6 +1,6 @@ +from _typeshed import ConvertibleToInt from collections.abc import Callable, Iterable -from typing import Any, Concatenate, Final, Self, TypeVar, overload -from typing import Literal as L +from typing import Any, ClassVar, Concatenate, Final, Literal as L, Self, overload import numpy as np import numpy.typing as npt @@ -21,13 +21,11 @@ from ._polytypes import ( _FuncLine, _FuncPoly2Ortho, _FuncPow, - _FuncPts, _FuncRoots, _FuncUnOp, _FuncVal, _FuncVal2D, _FuncVal3D, - _FuncValFromRoots, _FuncVander, _FuncVander2D, _FuncVander3D, @@ -74,96 +72,90 @@ __all__ = [ "chebinterpolate", ] -_NumberOrObjectT = TypeVar("_NumberOrObjectT", bound=np.number | np.object_) -def _cseries_to_zseries(c: npt.NDArray[_NumberOrObjectT]) -> _Series[_NumberOrObjectT]: ... -def _zseries_to_cseries(zs: npt.NDArray[_NumberOrObjectT]) -> _Series[_NumberOrObjectT]: ... -def _zseries_mul( - z1: npt.NDArray[_NumberOrObjectT], - z2: npt.NDArray[_NumberOrObjectT], -) -> _Series[_NumberOrObjectT]: ... -def _zseries_div( - z1: npt.NDArray[_NumberOrObjectT], - z2: npt.NDArray[_NumberOrObjectT], -) -> _Series[_NumberOrObjectT]: ... -def _zseries_der(zs: npt.NDArray[_NumberOrObjectT]) -> _Series[_NumberOrObjectT]: ... -def _zseries_int(zs: npt.NDArray[_NumberOrObjectT]) -> _Series[_NumberOrObjectT]: ... +### -poly2cheb: _FuncPoly2Ortho[L["poly2cheb"]] -cheb2poly: _FuncUnOp[L["cheb2poly"]] +def _cseries_to_zseries[ScalarT: np.number | np.object_](c: npt.NDArray[ScalarT]) -> _Series[ScalarT]: ... +def _zseries_to_cseries[ScalarT: np.number | np.object_](zs: npt.NDArray[ScalarT]) -> _Series[ScalarT]: ... +def _zseries_mul[ScalarT: np.number | np.object_](z1: npt.NDArray[ScalarT], z2: npt.NDArray[ScalarT]) -> _Series[ScalarT]: ... +def _zseries_div[ScalarT: np.number | np.object_](z1: npt.NDArray[ScalarT], z2: npt.NDArray[ScalarT]) -> _Series[ScalarT]: ... +def _zseries_der[ScalarT: np.number | np.object_](zs: npt.NDArray[ScalarT]) -> _Series[ScalarT]: ... +def _zseries_int[ScalarT: np.number | np.object_](zs: npt.NDArray[ScalarT]) -> _Series[ScalarT]: ... -chebdomain: Final[_Array2[np.float64]] -chebzero: Final[_Array1[np.int_]] -chebone: Final[_Array1[np.int_]] -chebx: Final[_Array2[np.int_]] +poly2cheb: Final[_FuncPoly2Ortho] = ... +cheb2poly: Final[_FuncUnOp] = ... -chebline: _FuncLine[L["chebline"]] -chebfromroots: _FuncFromRoots[L["chebfromroots"]] -chebadd: _FuncBinOp[L["chebadd"]] -chebsub: _FuncBinOp[L["chebsub"]] -chebmulx: _FuncUnOp[L["chebmulx"]] -chebmul: _FuncBinOp[L["chebmul"]] -chebdiv: _FuncBinOp[L["chebdiv"]] -chebpow: _FuncPow[L["chebpow"]] -chebder: _FuncDer[L["chebder"]] -chebint: _FuncInteg[L["chebint"]] -chebval: _FuncVal[L["chebval"]] -chebval2d: _FuncVal2D[L["chebval2d"]] -chebval3d: _FuncVal3D[L["chebval3d"]] -chebvalfromroots: _FuncValFromRoots[L["chebvalfromroots"]] -chebgrid2d: _FuncVal2D[L["chebgrid2d"]] -chebgrid3d: _FuncVal3D[L["chebgrid3d"]] -chebvander: _FuncVander[L["chebvander"]] -chebvander2d: _FuncVander2D[L["chebvander2d"]] -chebvander3d: _FuncVander3D[L["chebvander3d"]] -chebfit: _FuncFit[L["chebfit"]] -chebcompanion: _FuncCompanion[L["chebcompanion"]] -chebroots: _FuncRoots[L["chebroots"]] -chebgauss: _FuncGauss[L["chebgauss"]] -chebweight: _FuncWeight[L["chebweight"]] -chebpts1: _FuncPts[L["chebpts1"]] -chebpts2: _FuncPts[L["chebpts2"]] +chebdomain: Final[_Array2[np.float64]] = ... +chebzero: Final[_Array1[np.int_]] = ... +chebone: Final[_Array1[np.int_]] = ... +chebx: Final[_Array2[np.int_]] = ... -# keep in sync with `Chebyshev.interpolate` -_RT = TypeVar("_RT", bound=np.number | np.bool | np.object_) +chebline: Final[_FuncLine] = ... +chebfromroots: Final[_FuncFromRoots] = ... +chebadd: Final[_FuncBinOp] = ... +chebsub: Final[_FuncBinOp] = ... +chebmulx: Final[_FuncUnOp] = ... +chebmul: Final[_FuncBinOp] = ... +chebdiv: Final[_FuncBinOp] = ... +chebpow: Final[_FuncPow] = ... +chebder: Final[_FuncDer] = ... +chebint: Final[_FuncInteg] = ... +chebval: Final[_FuncVal] = ... +chebval2d: Final[_FuncVal2D] = ... +chebval3d: Final[_FuncVal3D] = ... +chebgrid2d: Final[_FuncVal2D] = ... +chebgrid3d: Final[_FuncVal3D] = ... +chebvander: Final[_FuncVander] = ... +chebvander2d: Final[_FuncVander2D] = ... +chebvander3d: Final[_FuncVander3D] = ... +chebfit: Final[_FuncFit] = ... +chebcompanion: Final[_FuncCompanion] = ... +chebroots: Final[_FuncRoots] = ... +chebgauss: Final[_FuncGauss] = ... +chebweight: Final[_FuncWeight] = ... +def chebpts1(npts: ConvertibleToInt) -> np.ndarray[tuple[int], np.dtype[np.float64]]: ... +def chebpts2(npts: ConvertibleToInt) -> np.ndarray[tuple[int], np.dtype[np.float64]]: ... + +# keep in sync with `Chebyshev.interpolate` (minus `domain` parameter) @overload def chebinterpolate( func: np.ufunc, deg: _IntLike_co, - args: tuple[()] = ..., + args: tuple[()] = (), ) -> npt.NDArray[np.float64 | np.complex128 | np.object_]: ... @overload -def chebinterpolate( - func: Callable[[npt.NDArray[np.float64]], _RT], +def chebinterpolate[CoefScalarT: np.number | np.bool | np.object_]( + func: Callable[[npt.NDArray[np.float64]], CoefScalarT], deg: _IntLike_co, - args: tuple[()] = ..., -) -> npt.NDArray[_RT]: ... + args: tuple[()] = (), +) -> npt.NDArray[CoefScalarT]: ... @overload -def chebinterpolate( - func: Callable[Concatenate[npt.NDArray[np.float64], ...], _RT], +def chebinterpolate[CoefScalarT: np.number | np.bool | np.object_]( + func: Callable[Concatenate[npt.NDArray[np.float64], ...], CoefScalarT], deg: _IntLike_co, args: Iterable[Any], -) -> npt.NDArray[_RT]: ... +) -> npt.NDArray[CoefScalarT]: ... class Chebyshev(ABCPolyBase[L["T"]]): + basis_name: ClassVar[L["T"]] = "T" # pyright: ignore[reportIncompatibleMethodOverride] + domain: _Array2[np.float64 | Any] = ... # pyright: ignore[reportIncompatibleMethodOverride] + window: _Array2[np.float64 | Any] = ... # pyright: ignore[reportIncompatibleMethodOverride] + @overload @classmethod def interpolate( cls, func: Callable[[npt.NDArray[np.float64]], _CoefSeries], deg: _IntLike_co, - domain: _SeriesLikeCoef_co | None = ..., - args: tuple[()] = ..., + domain: _SeriesLikeCoef_co | None = None, + args: tuple[()] = (), ) -> Self: ... @overload @classmethod def interpolate( cls, - func: Callable[ - Concatenate[npt.NDArray[np.float64], ...], - _CoefSeries, - ], + func: Callable[Concatenate[npt.NDArray[np.float64], ...], _CoefSeries], deg: _IntLike_co, - domain: _SeriesLikeCoef_co | None = ..., + domain: _SeriesLikeCoef_co | None = None, *, args: Iterable[Any], ) -> Self: ... @@ -171,10 +163,7 @@ class Chebyshev(ABCPolyBase[L["T"]]): @classmethod def interpolate( cls, - func: Callable[ - Concatenate[npt.NDArray[np.float64], ...], - _CoefSeries, - ], + func: Callable[Concatenate[npt.NDArray[np.float64], ...], _CoefSeries], deg: _IntLike_co, domain: _SeriesLikeCoef_co | None, args: Iterable[Any], diff --git a/numpy/polynomial/hermite.py b/numpy/polynomial/hermite.py index 47e1dfc05b4b..c6007d19df7f 100644 --- a/numpy/polynomial/hermite.py +++ b/numpy/polynomial/hermite.py @@ -76,8 +76,6 @@ """ import numpy as np -import numpy.linalg as la -from numpy.lib.array_utils import normalize_axis_index from . import polyutils as pu from ._polybase import ABCPolyBase @@ -655,7 +653,7 @@ def hermder(c, m=1, scl=1, axis=0): iaxis = pu._as_int(axis, "the axis") if cnt < 0: raise ValueError("The order of derivation must be non-negative") - iaxis = normalize_axis_index(iaxis, c.ndim) + iaxis = np.lib.array_utils.normalize_axis_index(iaxis, c.ndim) if cnt == 0: return c @@ -772,7 +770,7 @@ def hermint(c, m=1, k=[], lbnd=0, scl=1, axis=0): raise ValueError("lbnd must be a scalar.") if np.ndim(scl) != 0: raise ValueError("scl must be a scalar.") - iaxis = normalize_axis_index(iaxis, c.ndim) + iaxis = np.lib.array_utils.normalize_axis_index(iaxis, c.ndim) if cnt == 0: return c @@ -798,7 +796,7 @@ def hermint(c, m=1, k=[], lbnd=0, scl=1, axis=0): def hermval(x, c, tensor=True): """ - Evaluate an Hermite series at points x. + Evaluate a Hermite series at points x. If `c` is of length ``n + 1``, this function returns the value: @@ -1441,7 +1439,7 @@ def hermcompanion(c): """Return the scaled companion matrix of c. The basis polynomials are scaled so that the companion matrix is - symmetric when `c` is an Hermite basis polynomial. This provides + symmetric when `c` is a Hermite basis polynomial. This provides better eigenvalue estimates than the unscaled case and for basis polynomials the eigenvalues are guaranteed to be real if `numpy.linalg.eigvalsh` is used to obtain them. @@ -1543,7 +1541,7 @@ def hermroots(c): # rotated companion matrix reduces error m = hermcompanion(c)[::-1, ::-1] - r = la.eigvals(m) + r = np.linalg.eigvals(m) r.sort() return r @@ -1636,7 +1634,7 @@ def hermgauss(deg): # matrix is symmetric in this case in order to obtain better zeros. c = np.array([0] * deg + [1], dtype=np.float64) m = hermcompanion(c) - x = la.eigvalsh(m) + x = np.linalg.eigvalsh(m) # improve roots by one application of Newton dy = _normed_hermite_n(x, ideg) @@ -1695,7 +1693,7 @@ def hermweight(x): # class Hermite(ABCPolyBase): - """An Hermite series class. + """A Hermite series class. The Hermite class provides the standard Python numerical methods '+', '-', '*', '//', '%', 'divmod', '**', and '()' as well as the diff --git a/numpy/polynomial/hermite.pyi b/numpy/polynomial/hermite.pyi index f7d907c1b39d..60f4af5a1fd7 100644 --- a/numpy/polynomial/hermite.pyi +++ b/numpy/polynomial/hermite.pyi @@ -1,7 +1,7 @@ -from typing import Any, Final, TypeVar -from typing import Literal as L +from typing import Any, ClassVar, Final, Literal as L import numpy as np +from numpy._typing import _Shape from ._polybase import ABCPolyBase from ._polytypes import ( @@ -22,7 +22,6 @@ from ._polytypes import ( _FuncVal, _FuncVal2D, _FuncVal3D, - _FuncValFromRoots, _FuncVander, _FuncVander2D, _FuncVander3D, @@ -64,44 +63,45 @@ __all__ = [ "hermweight", ] -poly2herm: _FuncPoly2Ortho[L["poly2herm"]] -herm2poly: _FuncUnOp[L["herm2poly"]] +poly2herm: Final[_FuncPoly2Ortho] = ... +herm2poly: Final[_FuncUnOp] = ... -hermdomain: Final[_Array2[np.float64]] -hermzero: Final[_Array1[np.int_]] -hermone: Final[_Array1[np.int_]] -hermx: Final[_Array2[np.int_]] +hermdomain: Final[_Array2[np.float64]] = ... +hermzero: Final[_Array1[np.int_]] = ... +hermone: Final[_Array1[np.int_]] = ... +hermx: Final[_Array2[np.int_]] = ... -hermline: _FuncLine[L["hermline"]] -hermfromroots: _FuncFromRoots[L["hermfromroots"]] -hermadd: _FuncBinOp[L["hermadd"]] -hermsub: _FuncBinOp[L["hermsub"]] -hermmulx: _FuncUnOp[L["hermmulx"]] -hermmul: _FuncBinOp[L["hermmul"]] -hermdiv: _FuncBinOp[L["hermdiv"]] -hermpow: _FuncPow[L["hermpow"]] -hermder: _FuncDer[L["hermder"]] -hermint: _FuncInteg[L["hermint"]] -hermval: _FuncVal[L["hermval"]] -hermval2d: _FuncVal2D[L["hermval2d"]] -hermval3d: _FuncVal3D[L["hermval3d"]] -hermvalfromroots: _FuncValFromRoots[L["hermvalfromroots"]] -hermgrid2d: _FuncVal2D[L["hermgrid2d"]] -hermgrid3d: _FuncVal3D[L["hermgrid3d"]] -hermvander: _FuncVander[L["hermvander"]] -hermvander2d: _FuncVander2D[L["hermvander2d"]] -hermvander3d: _FuncVander3D[L["hermvander3d"]] -hermfit: _FuncFit[L["hermfit"]] -hermcompanion: _FuncCompanion[L["hermcompanion"]] -hermroots: _FuncRoots[L["hermroots"]] +hermline: Final[_FuncLine] = ... +hermfromroots: Final[_FuncFromRoots] = ... +hermadd: Final[_FuncBinOp] = ... +hermsub: Final[_FuncBinOp] = ... +hermmulx: Final[_FuncUnOp] = ... +hermmul: Final[_FuncBinOp] = ... +hermdiv: Final[_FuncBinOp] = ... +hermpow: Final[_FuncPow] = ... +hermder: Final[_FuncDer] = ... +hermint: Final[_FuncInteg] = ... +hermval: Final[_FuncVal] = ... +hermval2d: Final[_FuncVal2D] = ... +hermval3d: Final[_FuncVal3D] = ... +hermgrid2d: Final[_FuncVal2D] = ... +hermgrid3d: Final[_FuncVal3D] = ... +hermvander: Final[_FuncVander] = ... +hermvander2d: Final[_FuncVander2D] = ... +hermvander3d: Final[_FuncVander3D] = ... +hermfit: Final[_FuncFit] = ... +hermcompanion: Final[_FuncCompanion] = ... +hermroots: Final[_FuncRoots] = ... -_ND = TypeVar("_ND", bound=Any) -def _normed_hermite_n( - x: np.ndarray[_ND, np.dtype[np.float64]], - n: int | np.intp, -) -> np.ndarray[_ND, np.dtype[np.float64]]: ... +def _normed_hermite_n[ShapeT: _Shape]( + x: np.ndarray[ShapeT, np.dtype[np.float64]], + n: int, +) -> np.ndarray[ShapeT, np.dtype[np.float64]]: ... -hermgauss: _FuncGauss[L["hermgauss"]] -hermweight: _FuncWeight[L["hermweight"]] +hermgauss: Final[_FuncGauss] = ... +hermweight: Final[_FuncWeight] = ... -class Hermite(ABCPolyBase[L["H"]]): ... +class Hermite(ABCPolyBase[L["H"]]): + basis_name: ClassVar[L["H"]] = "H" # pyright: ignore[reportIncompatibleMethodOverride] + domain: _Array2[np.float64 | Any] = ... # pyright: ignore[reportIncompatibleMethodOverride] + window: _Array2[np.float64 | Any] = ... # pyright: ignore[reportIncompatibleMethodOverride] diff --git a/numpy/polynomial/hermite_e.py b/numpy/polynomial/hermite_e.py index d30fc1b5aa14..f5d82aa543b9 100644 --- a/numpy/polynomial/hermite_e.py +++ b/numpy/polynomial/hermite_e.py @@ -76,8 +76,6 @@ """ import numpy as np -import numpy.linalg as la -from numpy.lib.array_utils import normalize_axis_index from . import polyutils as pu from ._polybase import ABCPolyBase @@ -653,7 +651,7 @@ def hermeder(c, m=1, scl=1, axis=0): iaxis = pu._as_int(axis, "the axis") if cnt < 0: raise ValueError("The order of derivation must be non-negative") - iaxis = normalize_axis_index(iaxis, c.ndim) + iaxis = np.lib.array_utils.normalize_axis_index(iaxis, c.ndim) if cnt == 0: return c @@ -770,7 +768,7 @@ def hermeint(c, m=1, k=[], lbnd=0, scl=1, axis=0): raise ValueError("lbnd must be a scalar.") if np.ndim(scl) != 0: raise ValueError("scl must be a scalar.") - iaxis = normalize_axis_index(iaxis, c.ndim) + iaxis = np.lib.array_utils.normalize_axis_index(iaxis, c.ndim) if cnt == 0: return c @@ -796,7 +794,7 @@ def hermeint(c, m=1, k=[], lbnd=0, scl=1, axis=0): def hermeval(x, c, tensor=True): """ - Evaluate an HermiteE series at points x. + Evaluate a HermiteE series at points x. If `c` is of length ``n + 1``, this function returns the value: @@ -1367,7 +1365,7 @@ def hermecompanion(c): Return the scaled companion matrix of c. The basis polynomials are scaled so that the companion matrix is - symmetric when `c` is an HermiteE basis polynomial. This provides + symmetric when `c` is a HermiteE basis polynomial. This provides better eigenvalue estimates than the unscaled case and for basis polynomials the eigenvalues are guaranteed to be real if `numpy.linalg.eigvalsh` is used to obtain them. @@ -1461,7 +1459,7 @@ def hermeroots(c): # rotated companion matrix reduces error m = hermecompanion(c)[::-1, ::-1] - r = la.eigvals(m) + r = np.linalg.eigvals(m) r.sort() return r @@ -1548,7 +1546,7 @@ def hermegauss(deg): # matrix is symmetric in this case in order to obtain better zeros. c = np.array([0] * deg + [1]) m = hermecompanion(c) - x = la.eigvalsh(m) + x = np.linalg.eigvalsh(m) # improve roots by one application of Newton dy = _normed_hermite_e_n(x, ideg) @@ -1597,7 +1595,7 @@ def hermeweight(x): # class HermiteE(ABCPolyBase): - """An HermiteE series class. + """A HermiteE series class. The HermiteE class provides the standard Python numerical methods '+', '-', '*', '//', '%', 'divmod', '**', and '()' as well as the diff --git a/numpy/polynomial/hermite_e.pyi b/numpy/polynomial/hermite_e.pyi index e8013e66b62f..6997c8a381ef 100644 --- a/numpy/polynomial/hermite_e.pyi +++ b/numpy/polynomial/hermite_e.pyi @@ -1,7 +1,7 @@ -from typing import Any, Final, TypeVar -from typing import Literal as L +from typing import Any, ClassVar, Final, Literal as L import numpy as np +from numpy._typing import _Shape from ._polybase import ABCPolyBase from ._polytypes import ( @@ -22,7 +22,6 @@ from ._polytypes import ( _FuncVal, _FuncVal2D, _FuncVal3D, - _FuncValFromRoots, _FuncVander, _FuncVander2D, _FuncVander3D, @@ -64,44 +63,45 @@ __all__ = [ "hermeweight", ] -poly2herme: _FuncPoly2Ortho[L["poly2herme"]] -herme2poly: _FuncUnOp[L["herme2poly"]] +poly2herme: Final[_FuncPoly2Ortho] = ... +herme2poly: Final[_FuncUnOp] = ... -hermedomain: Final[_Array2[np.float64]] -hermezero: Final[_Array1[np.int_]] -hermeone: Final[_Array1[np.int_]] -hermex: Final[_Array2[np.int_]] +hermedomain: Final[_Array2[np.float64]] = ... +hermezero: Final[_Array1[np.int_]] = ... +hermeone: Final[_Array1[np.int_]] = ... +hermex: Final[_Array2[np.int_]] = ... -hermeline: _FuncLine[L["hermeline"]] -hermefromroots: _FuncFromRoots[L["hermefromroots"]] -hermeadd: _FuncBinOp[L["hermeadd"]] -hermesub: _FuncBinOp[L["hermesub"]] -hermemulx: _FuncUnOp[L["hermemulx"]] -hermemul: _FuncBinOp[L["hermemul"]] -hermediv: _FuncBinOp[L["hermediv"]] -hermepow: _FuncPow[L["hermepow"]] -hermeder: _FuncDer[L["hermeder"]] -hermeint: _FuncInteg[L["hermeint"]] -hermeval: _FuncVal[L["hermeval"]] -hermeval2d: _FuncVal2D[L["hermeval2d"]] -hermeval3d: _FuncVal3D[L["hermeval3d"]] -hermevalfromroots: _FuncValFromRoots[L["hermevalfromroots"]] -hermegrid2d: _FuncVal2D[L["hermegrid2d"]] -hermegrid3d: _FuncVal3D[L["hermegrid3d"]] -hermevander: _FuncVander[L["hermevander"]] -hermevander2d: _FuncVander2D[L["hermevander2d"]] -hermevander3d: _FuncVander3D[L["hermevander3d"]] -hermefit: _FuncFit[L["hermefit"]] -hermecompanion: _FuncCompanion[L["hermecompanion"]] -hermeroots: _FuncRoots[L["hermeroots"]] +hermeline: Final[_FuncLine] = ... +hermefromroots: Final[_FuncFromRoots] = ... +hermeadd: Final[_FuncBinOp] = ... +hermesub: Final[_FuncBinOp] = ... +hermemulx: Final[_FuncUnOp] = ... +hermemul: Final[_FuncBinOp] = ... +hermediv: Final[_FuncBinOp] = ... +hermepow: Final[_FuncPow] = ... +hermeder: Final[_FuncDer] = ... +hermeint: Final[_FuncInteg] = ... +hermeval: Final[_FuncVal] = ... +hermeval2d: Final[_FuncVal2D] = ... +hermeval3d: Final[_FuncVal3D] = ... +hermegrid2d: Final[_FuncVal2D] = ... +hermegrid3d: Final[_FuncVal3D] = ... +hermevander: Final[_FuncVander] = ... +hermevander2d: Final[_FuncVander2D] = ... +hermevander3d: Final[_FuncVander3D] = ... +hermefit: Final[_FuncFit] = ... +hermecompanion: Final[_FuncCompanion] = ... +hermeroots: Final[_FuncRoots] = ... -_ND = TypeVar("_ND", bound=Any) -def _normed_hermite_e_n( - x: np.ndarray[_ND, np.dtype[np.float64]], - n: int | np.intp, -) -> np.ndarray[_ND, np.dtype[np.float64]]: ... +def _normed_hermite_e_n[ShapeT: _Shape]( + x: np.ndarray[ShapeT, np.dtype[np.float64]], + n: int, +) -> np.ndarray[ShapeT, np.dtype[np.float64]]: ... -hermegauss: _FuncGauss[L["hermegauss"]] -hermeweight: _FuncWeight[L["hermeweight"]] +hermegauss: Final[_FuncGauss] = ... +hermeweight: Final[_FuncWeight] = ... -class HermiteE(ABCPolyBase[L["He"]]): ... +class HermiteE(ABCPolyBase[L["He"]]): + basis_name: ClassVar[L["He"]] = "He" # pyright: ignore[reportIncompatibleMethodOverride] + domain: _Array2[np.float64 | Any] = ... # pyright: ignore[reportIncompatibleMethodOverride] + window: _Array2[np.float64 | Any] = ... # pyright: ignore[reportIncompatibleMethodOverride] diff --git a/numpy/polynomial/laguerre.py b/numpy/polynomial/laguerre.py index 38eb5a80b200..b1d87bf6d035 100644 --- a/numpy/polynomial/laguerre.py +++ b/numpy/polynomial/laguerre.py @@ -76,8 +76,6 @@ """ import numpy as np -import numpy.linalg as la -from numpy.lib.array_utils import normalize_axis_index from . import polyutils as pu from ._polybase import ABCPolyBase @@ -650,7 +648,7 @@ def lagder(c, m=1, scl=1, axis=0): iaxis = pu._as_int(axis, "the axis") if cnt < 0: raise ValueError("The order of derivation must be non-negative") - iaxis = normalize_axis_index(iaxis, c.ndim) + iaxis = np.lib.array_utils.normalize_axis_index(iaxis, c.ndim) if cnt == 0: return c @@ -770,7 +768,7 @@ def lagint(c, m=1, k=[], lbnd=0, scl=1, axis=0): raise ValueError("lbnd must be a scalar.") if np.ndim(scl) != 0: raise ValueError("scl must be a scalar.") - iaxis = normalize_axis_index(iaxis, c.ndim) + iaxis = np.lib.array_utils.normalize_axis_index(iaxis, c.ndim) if cnt == 0: return c @@ -1185,7 +1183,7 @@ def lagvander2d(x, y, deg): correspond to the elements of a 2-D coefficient array `c` of shape (xdeg + 1, ydeg + 1) in the order - .. math:: c_{00}, c_{01}, c_{02} ... , c_{10}, c_{11}, c_{12} ... + .. math:: c_{00}, c_{01}, c_{02}, ... , c_{10}, c_{11}, c_{12}, ... and ``np.dot(V, c.flat)`` and ``lagval2d(x, y, c)`` will be the same up to roundoff. This equivalence is useful both for least squares @@ -1525,7 +1523,7 @@ def lagroots(c): # rotated companion matrix reduces error m = lagcompanion(c)[::-1, ::-1] - r = la.eigvals(m) + r = np.linalg.eigvals(m) r.sort() return r @@ -1577,7 +1575,7 @@ def laggauss(deg): # matrix is symmetric in this case in order to obtain better zeros. c = np.array([0] * deg + [1]) m = lagcompanion(c) - x = la.eigvalsh(m) + x = np.linalg.eigvalsh(m) # improve roots by one application of Newton dy = lagval(x, c) diff --git a/numpy/polynomial/laguerre.pyi b/numpy/polynomial/laguerre.pyi index 6f67257a607c..8b70b899ed59 100644 --- a/numpy/polynomial/laguerre.pyi +++ b/numpy/polynomial/laguerre.pyi @@ -1,5 +1,4 @@ -from typing import Final -from typing import Literal as L +from typing import Any, ClassVar, Final, Literal as L import numpy as np @@ -22,7 +21,6 @@ from ._polytypes import ( _FuncVal, _FuncVal2D, _FuncVal3D, - _FuncValFromRoots, _FuncVander, _FuncVander2D, _FuncVander3D, @@ -64,37 +62,39 @@ __all__ = [ "lagweight", ] -poly2lag: _FuncPoly2Ortho[L["poly2lag"]] -lag2poly: _FuncUnOp[L["lag2poly"]] +poly2lag: Final[_FuncPoly2Ortho] = ... +lag2poly: Final[_FuncUnOp] = ... -lagdomain: Final[_Array2[np.float64]] -lagzero: Final[_Array1[np.int_]] -lagone: Final[_Array1[np.int_]] -lagx: Final[_Array2[np.int_]] +lagdomain: Final[_Array2[np.float64]] = ... +lagzero: Final[_Array1[np.int_]] = ... +lagone: Final[_Array1[np.int_]] = ... +lagx: Final[_Array2[np.int_]] = ... -lagline: _FuncLine[L["lagline"]] -lagfromroots: _FuncFromRoots[L["lagfromroots"]] -lagadd: _FuncBinOp[L["lagadd"]] -lagsub: _FuncBinOp[L["lagsub"]] -lagmulx: _FuncUnOp[L["lagmulx"]] -lagmul: _FuncBinOp[L["lagmul"]] -lagdiv: _FuncBinOp[L["lagdiv"]] -lagpow: _FuncPow[L["lagpow"]] -lagder: _FuncDer[L["lagder"]] -lagint: _FuncInteg[L["lagint"]] -lagval: _FuncVal[L["lagval"]] -lagval2d: _FuncVal2D[L["lagval2d"]] -lagval3d: _FuncVal3D[L["lagval3d"]] -lagvalfromroots: _FuncValFromRoots[L["lagvalfromroots"]] -laggrid2d: _FuncVal2D[L["laggrid2d"]] -laggrid3d: _FuncVal3D[L["laggrid3d"]] -lagvander: _FuncVander[L["lagvander"]] -lagvander2d: _FuncVander2D[L["lagvander2d"]] -lagvander3d: _FuncVander3D[L["lagvander3d"]] -lagfit: _FuncFit[L["lagfit"]] -lagcompanion: _FuncCompanion[L["lagcompanion"]] -lagroots: _FuncRoots[L["lagroots"]] -laggauss: _FuncGauss[L["laggauss"]] -lagweight: _FuncWeight[L["lagweight"]] +lagline: Final[_FuncLine] = ... +lagfromroots: Final[_FuncFromRoots] = ... +lagadd: Final[_FuncBinOp] = ... +lagsub: Final[_FuncBinOp] = ... +lagmulx: Final[_FuncUnOp] = ... +lagmul: Final[_FuncBinOp] = ... +lagdiv: Final[_FuncBinOp] = ... +lagpow: Final[_FuncPow] = ... +lagder: Final[_FuncDer] = ... +lagint: Final[_FuncInteg] = ... +lagval: Final[_FuncVal] = ... +lagval2d: Final[_FuncVal2D] = ... +lagval3d: Final[_FuncVal3D] = ... +laggrid2d: Final[_FuncVal2D] = ... +laggrid3d: Final[_FuncVal3D] = ... +lagvander: Final[_FuncVander] = ... +lagvander2d: Final[_FuncVander2D] = ... +lagvander3d: Final[_FuncVander3D] = ... +lagfit: Final[_FuncFit] = ... +lagcompanion: Final[_FuncCompanion] = ... +lagroots: Final[_FuncRoots] = ... +laggauss: Final[_FuncGauss] = ... +lagweight: Final[_FuncWeight] = ... -class Laguerre(ABCPolyBase[L["L"]]): ... +class Laguerre(ABCPolyBase[L["L"]]): + basis_name: ClassVar[L["L"]] = "L" # pyright: ignore[reportIncompatibleMethodOverride] + domain: _Array2[np.float64 | Any] = ... # pyright: ignore[reportIncompatibleMethodOverride] + window: _Array2[np.float64 | Any] = ... # pyright: ignore[reportIncompatibleMethodOverride] diff --git a/numpy/polynomial/legendre.py b/numpy/polynomial/legendre.py index b43bdfa83034..237e340cbf45 100644 --- a/numpy/polynomial/legendre.py +++ b/numpy/polynomial/legendre.py @@ -80,8 +80,6 @@ """ import numpy as np -import numpy.linalg as la -from numpy.lib.array_utils import normalize_axis_index from . import polyutils as pu from ._polybase import ABCPolyBase @@ -676,7 +674,7 @@ def legder(c, m=1, scl=1, axis=0): iaxis = pu._as_int(axis, "the axis") if cnt < 0: raise ValueError("The order of derivation must be non-negative") - iaxis = normalize_axis_index(iaxis, c.ndim) + iaxis = np.lib.array_utils.normalize_axis_index(iaxis, c.ndim) if cnt == 0: return c @@ -799,7 +797,7 @@ def legint(c, m=1, k=[], lbnd=0, scl=1, axis=0): raise ValueError("lbnd must be a scalar.") if np.ndim(scl) != 0: raise ValueError("scl must be a scalar.") - iaxis = normalize_axis_index(iaxis, c.ndim) + iaxis = np.lib.array_utils.normalize_axis_index(iaxis, c.ndim) if cnt == 0: return c @@ -1164,7 +1162,7 @@ def legvander2d(x, y, deg): correspond to the elements of a 2-D coefficient array `c` of shape (xdeg + 1, ydeg + 1) in the order - .. math:: c_{00}, c_{01}, c_{02} ... , c_{10}, c_{11}, c_{12} ... + .. math:: c_{00}, c_{01}, c_{02}, ... , c_{10}, c_{11}, c_{12}, ... and ``np.dot(V, c.flat)`` and ``legval2d(x, y, c)`` will be the same up to roundoff. This equivalence is useful both for least squares @@ -1464,7 +1462,7 @@ def legroots(c): # rotated companion matrix reduces error m = legcompanion(c)[::-1, ::-1] - r = la.eigvals(m) + r = np.linalg.eigvals(m) r.sort() return r @@ -1510,7 +1508,7 @@ def leggauss(deg): # matrix is symmetric in this case in order to obtain better zeros. c = np.array([0] * deg + [1]) m = legcompanion(c) - x = la.eigvalsh(m) + x = np.linalg.eigvalsh(m) # improve roots by one application of Newton dy = legval(x, c) diff --git a/numpy/polynomial/legendre.pyi b/numpy/polynomial/legendre.pyi index 35ea2ffd2bf2..53f8f7c210fa 100644 --- a/numpy/polynomial/legendre.pyi +++ b/numpy/polynomial/legendre.pyi @@ -1,5 +1,4 @@ -from typing import Final -from typing import Literal as L +from typing import Any, ClassVar, Final, Literal as L import numpy as np @@ -22,7 +21,6 @@ from ._polytypes import ( _FuncVal, _FuncVal2D, _FuncVal3D, - _FuncValFromRoots, _FuncVander, _FuncVander2D, _FuncVander3D, @@ -64,37 +62,39 @@ __all__ = [ "legweight", ] -poly2leg: _FuncPoly2Ortho[L["poly2leg"]] -leg2poly: _FuncUnOp[L["leg2poly"]] +poly2leg: Final[_FuncPoly2Ortho] = ... +leg2poly: Final[_FuncUnOp] = ... -legdomain: Final[_Array2[np.float64]] -legzero: Final[_Array1[np.int_]] -legone: Final[_Array1[np.int_]] -legx: Final[_Array2[np.int_]] +legdomain: Final[_Array2[np.float64]] = ... +legzero: Final[_Array1[np.int_]] = ... +legone: Final[_Array1[np.int_]] = ... +legx: Final[_Array2[np.int_]] = ... -legline: _FuncLine[L["legline"]] -legfromroots: _FuncFromRoots[L["legfromroots"]] -legadd: _FuncBinOp[L["legadd"]] -legsub: _FuncBinOp[L["legsub"]] -legmulx: _FuncUnOp[L["legmulx"]] -legmul: _FuncBinOp[L["legmul"]] -legdiv: _FuncBinOp[L["legdiv"]] -legpow: _FuncPow[L["legpow"]] -legder: _FuncDer[L["legder"]] -legint: _FuncInteg[L["legint"]] -legval: _FuncVal[L["legval"]] -legval2d: _FuncVal2D[L["legval2d"]] -legval3d: _FuncVal3D[L["legval3d"]] -legvalfromroots: _FuncValFromRoots[L["legvalfromroots"]] -leggrid2d: _FuncVal2D[L["leggrid2d"]] -leggrid3d: _FuncVal3D[L["leggrid3d"]] -legvander: _FuncVander[L["legvander"]] -legvander2d: _FuncVander2D[L["legvander2d"]] -legvander3d: _FuncVander3D[L["legvander3d"]] -legfit: _FuncFit[L["legfit"]] -legcompanion: _FuncCompanion[L["legcompanion"]] -legroots: _FuncRoots[L["legroots"]] -leggauss: _FuncGauss[L["leggauss"]] -legweight: _FuncWeight[L["legweight"]] +legline: Final[_FuncLine] = ... +legfromroots: Final[_FuncFromRoots] = ... +legadd: Final[_FuncBinOp] = ... +legsub: Final[_FuncBinOp] = ... +legmulx: Final[_FuncUnOp] = ... +legmul: Final[_FuncBinOp] = ... +legdiv: Final[_FuncBinOp] = ... +legpow: Final[_FuncPow] = ... +legder: Final[_FuncDer] = ... +legint: Final[_FuncInteg] = ... +legval: Final[_FuncVal] = ... +legval2d: Final[_FuncVal2D] = ... +legval3d: Final[_FuncVal3D] = ... +leggrid2d: Final[_FuncVal2D] = ... +leggrid3d: Final[_FuncVal3D] = ... +legvander: Final[_FuncVander] = ... +legvander2d: Final[_FuncVander2D] = ... +legvander3d: Final[_FuncVander3D] = ... +legfit: Final[_FuncFit] = ... +legcompanion: Final[_FuncCompanion] = ... +legroots: Final[_FuncRoots] = ... +leggauss: Final[_FuncGauss] = ... +legweight: Final[_FuncWeight] = ... -class Legendre(ABCPolyBase[L["P"]]): ... +class Legendre(ABCPolyBase[L["P"]]): + basis_name: ClassVar[L["P"]] = "P" # pyright: ignore[reportIncompatibleMethodOverride] + domain: _Array2[np.float64 | Any] = ... # pyright: ignore[reportIncompatibleMethodOverride] + window: _Array2[np.float64 | Any] = ... # pyright: ignore[reportIncompatibleMethodOverride] diff --git a/numpy/polynomial/polynomial.py b/numpy/polynomial/polynomial.py index 32b53b757a1c..e3823c89cd98 100644 --- a/numpy/polynomial/polynomial.py +++ b/numpy/polynomial/polynomial.py @@ -81,8 +81,7 @@ 'polycompanion'] import numpy as np -import numpy.linalg as la -from numpy.lib.array_utils import normalize_axis_index +from numpy._core.overrides import array_function_dispatch as _array_function_dispatch from . import polyutils as pu from ._polybase import ABCPolyBase @@ -522,7 +521,7 @@ def polyder(c, m=1, scl=1, axis=0): iaxis = pu._as_int(axis, "the axis") if cnt < 0: raise ValueError("The order of derivation must be non-negative") - iaxis = normalize_axis_index(iaxis, c.ndim) + iaxis = np.lib.array_utils.normalize_axis_index(iaxis, c.ndim) if cnt == 0: return c @@ -636,7 +635,7 @@ def polyint(c, m=1, k=[], lbnd=0, scl=1, axis=0): raise ValueError("lbnd must be a scalar.") if np.ndim(scl) != 0: raise ValueError("scl must be a scalar.") - iaxis = normalize_axis_index(iaxis, c.ndim) + iaxis = np.lib.array_utils.normalize_axis_index(iaxis, c.ndim) if cnt == 0: return c @@ -716,6 +715,10 @@ def polyval(x, c, tensor=True): ----- The evaluation uses Horner's method. + When using coefficients from polynomials created with ``Polynomial.fit()``, + use ``p(x)`` or ``polyval(x, p.convert().coef)`` to handle domain/window + scaling correctly, not ``polyval(x, p.coef)``. + Examples -------- >>> import numpy as np @@ -841,7 +844,13 @@ def polyvalfromroots(x, r, tensor=True): raise ValueError("x.ndim must be < r.ndim when tensor == False") return np.prod(x - r, axis=0) +def _polyval2d_dispatcher(x, y, c): + return (x, y, c) + +def _polygrid2d_dispatcher(x, y, c): + return (x, y, c) +@_array_function_dispatch(_polyval2d_dispatcher) def polyval2d(x, y, c): """ Evaluate a 2-D polynomial at points (x, y). @@ -893,7 +902,7 @@ def polyval2d(x, y, c): """ return pu._valnd(polyval, c, x, y) - +@_array_function_dispatch(_polygrid2d_dispatcher) def polygrid2d(x, y, c): """ Evaluate a 2-D polynomial on the Cartesian product of x and y. @@ -1536,7 +1545,7 @@ def polyroots(c): return np.array([-c[0] / c[1]]) m = polycompanion(c) - r = la.eigvals(m) + r = np.linalg.eigvals(m) r.sort() return r diff --git a/numpy/polynomial/polynomial.pyi b/numpy/polynomial/polynomial.pyi index b4c784492b50..86f288468a15 100644 --- a/numpy/polynomial/polynomial.pyi +++ b/numpy/polynomial/polynomial.pyi @@ -1,12 +1,19 @@ -from typing import Final -from typing import Literal as L +from typing import Any, ClassVar, Final, overload import numpy as np +import numpy.typing as npt +from numpy._typing import ( + _ArrayLikeFloat_co, + _ArrayLikeNumber_co, + _FloatLike_co, + _NumberLike_co, +) from ._polybase import ABCPolyBase from ._polytypes import ( _Array1, _Array2, + _ArrayLikeCoef_co, _FuncBinOp, _FuncCompanion, _FuncDer, @@ -20,7 +27,6 @@ from ._polytypes import ( _FuncVal, _FuncVal2D, _FuncVal3D, - _FuncValFromRoots, _FuncVander, _FuncVander2D, _FuncVander3D, @@ -58,32 +64,46 @@ __all__ = [ "polycompanion", ] -polydomain: Final[_Array2[np.float64]] -polyzero: Final[_Array1[np.int_]] -polyone: Final[_Array1[np.int_]] -polyx: Final[_Array2[np.int_]] +polydomain: Final[_Array2[np.float64]] = ... +polyzero: Final[_Array1[np.int_]] = ... +polyone: Final[_Array1[np.int_]] = ... +polyx: Final[_Array2[np.int_]] = ... + +polyline: Final[_FuncLine] = ... +polyfromroots: Final[_FuncFromRoots] = ... +polyadd: Final[_FuncBinOp] = ... +polysub: Final[_FuncBinOp] = ... +polymulx: Final[_FuncUnOp] = ... +polymul: Final[_FuncBinOp] = ... +polydiv: Final[_FuncBinOp] = ... +polypow: Final[_FuncPow] = ... +polyder: Final[_FuncDer] = ... +polyint: Final[_FuncInteg] = ... +polyval: Final[_FuncVal] = ... +polyval2d: Final[_FuncVal2D] = ... +polyval3d: Final[_FuncVal3D] = ... + +@overload +def polyvalfromroots(x: _FloatLike_co, r: _FloatLike_co, tensor: bool = True) -> np.float64 | Any: ... +@overload +def polyvalfromroots(x: _NumberLike_co, r: _NumberLike_co, tensor: bool = True) -> np.complex128 | Any: ... +@overload +def polyvalfromroots(x: _ArrayLikeFloat_co, r: _ArrayLikeFloat_co, tensor: bool = True) -> npt.NDArray[np.float64 | Any]: ... +@overload +def polyvalfromroots(x: _ArrayLikeNumber_co, r: _ArrayLikeNumber_co, tensor: bool = True) -> npt.NDArray[np.complex128 | Any]: ... +@overload +def polyvalfromroots(x: _ArrayLikeCoef_co, r: _ArrayLikeCoef_co, tensor: bool = True) -> npt.NDArray[np.object_ | Any]: ... -polyline: _FuncLine[L["Polyline"]] -polyfromroots: _FuncFromRoots[L["polyfromroots"]] -polyadd: _FuncBinOp[L["polyadd"]] -polysub: _FuncBinOp[L["polysub"]] -polymulx: _FuncUnOp[L["polymulx"]] -polymul: _FuncBinOp[L["polymul"]] -polydiv: _FuncBinOp[L["polydiv"]] -polypow: _FuncPow[L["polypow"]] -polyder: _FuncDer[L["polyder"]] -polyint: _FuncInteg[L["polyint"]] -polyval: _FuncVal[L["polyval"]] -polyval2d: _FuncVal2D[L["polyval2d"]] -polyval3d: _FuncVal3D[L["polyval3d"]] -polyvalfromroots: _FuncValFromRoots[L["polyvalfromroots"]] -polygrid2d: _FuncVal2D[L["polygrid2d"]] -polygrid3d: _FuncVal3D[L["polygrid3d"]] -polyvander: _FuncVander[L["polyvander"]] -polyvander2d: _FuncVander2D[L["polyvander2d"]] -polyvander3d: _FuncVander3D[L["polyvander3d"]] -polyfit: _FuncFit[L["polyfit"]] -polycompanion: _FuncCompanion[L["polycompanion"]] -polyroots: _FuncRoots[L["polyroots"]] +polygrid2d: Final[_FuncVal2D] = ... +polygrid3d: Final[_FuncVal3D] = ... +polyvander: Final[_FuncVander] = ... +polyvander2d: Final[_FuncVander2D] = ... +polyvander3d: Final[_FuncVander3D] = ... +polyfit: Final[_FuncFit] = ... +polycompanion: Final[_FuncCompanion] = ... +polyroots: Final[_FuncRoots] = ... -class Polynomial(ABCPolyBase[None]): ... +class Polynomial(ABCPolyBase[None]): + basis_name: ClassVar[None] = None # pyright: ignore[reportIncompatibleMethodOverride] + domain: _Array2[np.float64 | Any] = ... # pyright: ignore[reportIncompatibleMethodOverride] + window: _Array2[np.float64 | Any] = ... # pyright: ignore[reportIncompatibleMethodOverride] diff --git a/numpy/polynomial/polyutils.py b/numpy/polynomial/polyutils.py index 18dc0a8d1d24..5e0e1af973ae 100644 --- a/numpy/polynomial/polyutils.py +++ b/numpy/polynomial/polyutils.py @@ -23,8 +23,6 @@ import warnings import numpy as np -from numpy._core.multiarray import dragon4_positional, dragon4_scientific -from numpy.exceptions import RankWarning __all__ = [ 'as_series', 'trimseq', 'trimcoef', 'getdomain', 'mapdomain', 'mapparms', @@ -661,7 +659,7 @@ def _fit(vander_f, x, y, deg, rcond=None, full=False, w=None): # warn on rank reduction if rank != order and not full: msg = "The fit may be poorly conditioned" - warnings.warn(msg, RankWarning, stacklevel=2) + warnings.warn(msg, np.exceptions.RankWarning, stacklevel=2) if full: return c, [resids, rank, s, rcond] @@ -725,6 +723,8 @@ def _as_int(x, desc): def format_float(x, parens=False): + from numpy._core.multiarray import dragon4_positional, dragon4_scientific + if not np.issubdtype(type(x), np.floating): return str(x) diff --git a/numpy/polynomial/polyutils.pyi b/numpy/polynomial/polyutils.pyi index c627e16dca1d..fbaaf7d22880 100644 --- a/numpy/polynomial/polyutils.pyi +++ b/numpy/polynomial/polyutils.pyi @@ -1,18 +1,12 @@ from collections.abc import Callable, Iterable, Sequence -from typing import ( - Final, - Literal, - SupportsIndex, - TypeAlias, - TypeVar, - overload, -) +from typing import Final, Literal, Protocol, SupportsIndex, overload, type_check_only import numpy as np import numpy.typing as npt from numpy._typing import ( _ArrayLikeComplex_co, _ArrayLikeFloat_co, + _ArrayLikeObject_co, _FloatLike_co, _NumberLike_co, ) @@ -29,210 +23,109 @@ from ._polytypes import ( _FloatArray, _FloatSeries, _FuncBinOp, - _FuncValND, - _FuncVanderND, _ObjectArray, _ObjectSeries, _SeriesLikeCoef_co, _SeriesLikeComplex_co, _SeriesLikeFloat_co, _SeriesLikeInt_co, + _SeriesLikeObject_co, _Tuple2, ) -__all__: Final[Sequence[str]] = [ - "as_series", - "format_float", - "getdomain", - "mapdomain", - "mapparms", - "trimcoef", - "trimseq", -] +__all__ = ["as_series", "format_float", "getdomain", "mapdomain", "mapparms", "trimcoef", "trimseq"] -_AnyLineF: TypeAlias = Callable[ - [_CoefLike_co, _CoefLike_co], - _CoefArray, -] -_AnyMulF: TypeAlias = Callable[ - [npt.ArrayLike, npt.ArrayLike], - _CoefArray, -] -_AnyVanderF: TypeAlias = Callable[ - [npt.ArrayLike, SupportsIndex], - _CoefArray, -] +type _AnyLineF = Callable[[float, float], _CoefArray] +type _AnyMulF = Callable[[np.ndarray | list[int], np.ndarray], _CoefArray] +type _AnyVanderF = Callable[[np.ndarray, int], _CoefArray] +@type_check_only +class _ValFunc[T](Protocol): + def __call__(self, x: np.ndarray, c: T, /, *, tensor: bool = True) -> T: ... + +### + +@overload +def as_series(alist: npt.NDArray[np.integer] | _FloatArray, trim: bool = True) -> list[_FloatSeries]: ... +@overload +def as_series(alist: _ComplexArray, trim: bool = True) -> list[_ComplexSeries]: ... @overload -def as_series( - alist: npt.NDArray[np.integer] | _FloatArray, - trim: bool = ..., -) -> list[_FloatSeries]: ... -@overload -def as_series( - alist: _ComplexArray, - trim: bool = ..., -) -> list[_ComplexSeries]: ... -@overload -def as_series( - alist: _ObjectArray, - trim: bool = ..., -) -> list[_ObjectSeries]: ... -@overload -def as_series( # type: ignore[overload-overlap] - alist: Iterable[_FloatArray | npt.NDArray[np.integer]], - trim: bool = ..., -) -> list[_FloatSeries]: ... -@overload -def as_series( - alist: Iterable[_ComplexArray], - trim: bool = ..., -) -> list[_ComplexSeries]: ... -@overload -def as_series( - alist: Iterable[_ObjectArray], - trim: bool = ..., -) -> list[_ObjectSeries]: ... -@overload -def as_series( # type: ignore[overload-overlap] - alist: Iterable[_SeriesLikeFloat_co | float], - trim: bool = ..., -) -> list[_FloatSeries]: ... -@overload -def as_series( - alist: Iterable[_SeriesLikeComplex_co | complex], - trim: bool = ..., -) -> list[_ComplexSeries]: ... -@overload -def as_series( - alist: Iterable[_SeriesLikeCoef_co | object], - trim: bool = ..., -) -> list[_ObjectSeries]: ... +def as_series(alist: _ObjectArray, trim: bool = True) -> list[_ObjectSeries]: ... +@overload +def as_series(alist: Iterable[_FloatArray | npt.NDArray[np.integer]], trim: bool = True) -> list[_FloatSeries]: ... +@overload +def as_series(alist: Iterable[_ComplexArray], trim: bool = True) -> list[_ComplexSeries]: ... +@overload +def as_series(alist: Iterable[_ObjectArray], trim: bool = True) -> list[_ObjectSeries]: ... +@overload +def as_series(alist: Iterable[_SeriesLikeFloat_co | float], trim: bool = True) -> list[_FloatSeries]: ... +@overload +def as_series(alist: Iterable[_SeriesLikeComplex_co | complex], trim: bool = True) -> list[_ComplexSeries]: ... +@overload +def as_series(alist: Iterable[_SeriesLikeCoef_co | object], trim: bool = True) -> list[_ObjectSeries]: ... -_T_seq = TypeVar("_T_seq", bound=_CoefArray | Sequence[_CoefLike_co]) -def trimseq(seq: _T_seq) -> _T_seq: ... +# +def trimseq[SeqT: _CoefArray | Sequence[_CoefLike_co]](seq: SeqT) -> SeqT: ... +# @overload -def trimcoef( # type: ignore[overload-overlap] - c: npt.NDArray[np.integer] | _FloatArray, - tol: _FloatLike_co = ..., -) -> _FloatSeries: ... +def trimcoef(c: npt.NDArray[np.integer] | _FloatArray, tol: _FloatLike_co = 0) -> _FloatSeries: ... @overload -def trimcoef( - c: _ComplexArray, - tol: _FloatLike_co = ..., -) -> _ComplexSeries: ... +def trimcoef(c: _ComplexArray, tol: _FloatLike_co = 0) -> _ComplexSeries: ... @overload -def trimcoef( - c: _ObjectArray, - tol: _FloatLike_co = ..., -) -> _ObjectSeries: ... +def trimcoef(c: _ObjectArray, tol: _FloatLike_co = 0) -> _ObjectSeries: ... @overload -def trimcoef( # type: ignore[overload-overlap] - c: _SeriesLikeFloat_co | float, - tol: _FloatLike_co = ..., -) -> _FloatSeries: ... +def trimcoef(c: _SeriesLikeFloat_co | float, tol: _FloatLike_co = 0) -> _FloatSeries: ... @overload -def trimcoef( - c: _SeriesLikeComplex_co | complex, - tol: _FloatLike_co = ..., -) -> _ComplexSeries: ... +def trimcoef(c: _SeriesLikeComplex_co | complex, tol: _FloatLike_co = 0) -> _ComplexSeries: ... @overload -def trimcoef( - c: _SeriesLikeCoef_co | object, - tol: _FloatLike_co = ..., -) -> _ObjectSeries: ... +def trimcoef(c: _SeriesLikeCoef_co | object, tol: _FloatLike_co = 0) -> _ObjectSeries: ... +# @overload -def getdomain( # type: ignore[overload-overlap] - x: _FloatArray | npt.NDArray[np.integer], -) -> _Array2[np.float64]: ... +def getdomain(x: _FloatArray | npt.NDArray[np.integer]) -> _Array2[np.float64]: ... @overload -def getdomain( - x: _ComplexArray, -) -> _Array2[np.complex128]: ... +def getdomain(x: _ComplexArray) -> _Array2[np.complex128]: ... @overload -def getdomain( - x: _ObjectArray, -) -> _Array2[np.object_]: ... +def getdomain(x: _ObjectArray) -> _Array2[np.object_]: ... @overload -def getdomain( # type: ignore[overload-overlap] - x: _SeriesLikeFloat_co | float, -) -> _Array2[np.float64]: ... +def getdomain(x: _SeriesLikeFloat_co | float) -> _Array2[np.float64]: ... @overload -def getdomain( - x: _SeriesLikeComplex_co | complex, -) -> _Array2[np.complex128]: ... +def getdomain(x: _SeriesLikeComplex_co | complex) -> _Array2[np.complex128]: ... @overload -def getdomain( - x: _SeriesLikeCoef_co | object, -) -> _Array2[np.object_]: ... +def getdomain(x: _SeriesLikeCoef_co | object) -> _Array2[np.object_]: ... +# @overload -def mapparms( # type: ignore[overload-overlap] - old: npt.NDArray[np.floating | np.integer], - new: npt.NDArray[np.floating | np.integer], -) -> _Tuple2[np.floating]: ... +def mapparms(old: npt.NDArray[np.floating | np.integer], new: npt.NDArray[np.floating | np.integer]) -> _Tuple2[np.floating]: ... @overload -def mapparms( - old: npt.NDArray[np.number], - new: npt.NDArray[np.number], -) -> _Tuple2[np.complexfloating]: ... +def mapparms(old: npt.NDArray[np.number], new: npt.NDArray[np.number]) -> _Tuple2[np.complexfloating]: ... @overload -def mapparms( - old: npt.NDArray[np.object_ | np.number], - new: npt.NDArray[np.object_ | np.number], -) -> _Tuple2[object]: ... -@overload -def mapparms( # type: ignore[overload-overlap] - old: Sequence[float], - new: Sequence[float], -) -> _Tuple2[float]: ... -@overload -def mapparms( - old: Sequence[complex], - new: Sequence[complex], -) -> _Tuple2[complex]: ... -@overload -def mapparms( - old: _SeriesLikeFloat_co, - new: _SeriesLikeFloat_co, -) -> _Tuple2[np.floating]: ... -@overload -def mapparms( - old: _SeriesLikeComplex_co, - new: _SeriesLikeComplex_co, -) -> _Tuple2[np.complexfloating]: ... -@overload -def mapparms( - old: _SeriesLikeCoef_co, - new: _SeriesLikeCoef_co, -) -> _Tuple2[object]: ... +def mapparms(old: npt.NDArray[np.object_ | np.number], new: npt.NDArray[np.object_ | np.number]) -> _Tuple2[object]: ... +@overload +def mapparms(old: Sequence[float], new: Sequence[float]) -> _Tuple2[float]: ... +@overload +def mapparms(old: Sequence[complex], new: Sequence[complex]) -> _Tuple2[complex]: ... +@overload +def mapparms(old: _SeriesLikeFloat_co, new: _SeriesLikeFloat_co) -> _Tuple2[np.floating]: ... +@overload +def mapparms(old: _SeriesLikeComplex_co, new: _SeriesLikeComplex_co) -> _Tuple2[np.complexfloating]: ... +@overload +def mapparms(old: _SeriesLikeCoef_co, new: _SeriesLikeCoef_co) -> _Tuple2[object]: ... +# @overload -def mapdomain( # type: ignore[overload-overlap] - x: _FloatLike_co, - old: _SeriesLikeFloat_co, - new: _SeriesLikeFloat_co, -) -> np.floating: ... +def mapdomain(x: _FloatLike_co, old: _SeriesLikeFloat_co, new: _SeriesLikeFloat_co) -> np.floating: ... @overload -def mapdomain( - x: _NumberLike_co, - old: _SeriesLikeComplex_co, - new: _SeriesLikeComplex_co, -) -> np.complexfloating: ... +def mapdomain(x: _NumberLike_co, old: _SeriesLikeComplex_co, new: _SeriesLikeComplex_co) -> np.complexfloating: ... @overload -def mapdomain( # type: ignore[overload-overlap] +def mapdomain( x: npt.NDArray[np.floating | np.integer], old: npt.NDArray[np.floating | np.integer], new: npt.NDArray[np.floating | np.integer], ) -> _FloatSeries: ... @overload -def mapdomain( - x: npt.NDArray[np.number], - old: npt.NDArray[np.number], - new: npt.NDArray[np.number], -) -> _ComplexSeries: ... +def mapdomain(x: npt.NDArray[np.number], old: npt.NDArray[np.number], new: npt.NDArray[np.number]) -> _ComplexSeries: ... @overload def mapdomain( x: npt.NDArray[np.object_ | np.number], @@ -240,137 +133,118 @@ def mapdomain( new: npt.NDArray[np.object_ | np.number], ) -> _ObjectSeries: ... @overload -def mapdomain( # type: ignore[overload-overlap] - x: _SeriesLikeFloat_co, - old: _SeriesLikeFloat_co, - new: _SeriesLikeFloat_co, -) -> _FloatSeries: ... +def mapdomain(x: _SeriesLikeFloat_co, old: _SeriesLikeFloat_co, new: _SeriesLikeFloat_co) -> _FloatSeries: ... @overload -def mapdomain( - x: _SeriesLikeComplex_co, - old: _SeriesLikeComplex_co, - new: _SeriesLikeComplex_co, -) -> _ComplexSeries: ... +def mapdomain(x: _SeriesLikeComplex_co, old: _SeriesLikeComplex_co, new: _SeriesLikeComplex_co) -> _ComplexSeries: ... @overload -def mapdomain( - x: _SeriesLikeCoef_co, - old: _SeriesLikeCoef_co, - new: _SeriesLikeCoef_co, -) -> _ObjectSeries: ... +def mapdomain(x: _SeriesLikeCoef_co, old: _SeriesLikeCoef_co, new: _SeriesLikeCoef_co) -> _ObjectSeries: ... @overload -def mapdomain( - x: _CoefLike_co, - old: _SeriesLikeCoef_co, - new: _SeriesLikeCoef_co, -) -> object: ... +def mapdomain(x: _CoefLike_co, old: _SeriesLikeCoef_co, new: _SeriesLikeCoef_co) -> object: ... -def _nth_slice( - i: SupportsIndex, - ndim: SupportsIndex, -) -> tuple[slice | None, ...]: ... +# +def _nth_slice(i: SupportsIndex, ndim: SupportsIndex) -> tuple[slice | None, ...]: ... + +# keep in sync with `vander_nd_flat` +@overload +def _vander_nd( + vander_fs: Sequence[_AnyVanderF], + points: Sequence[_ArrayLikeFloat_co], + degrees: Sequence[SupportsIndex], +) -> _FloatArray: ... +@overload +def _vander_nd( + vander_fs: Sequence[_AnyVanderF], + points: Sequence[_ArrayLikeComplex_co], + degrees: Sequence[SupportsIndex], +) -> _ComplexArray: ... +@overload +def _vander_nd( + vander_fs: Sequence[_AnyVanderF], + points: Sequence[_ArrayLikeObject_co | _ArrayLikeComplex_co], + degrees: Sequence[SupportsIndex], +) -> _ObjectArray: ... +@overload +def _vander_nd( + vander_fs: Sequence[_AnyVanderF], + points: Sequence[npt.ArrayLike], + degrees: Sequence[SupportsIndex], +) -> _CoefArray: ... -_vander_nd: _FuncVanderND[Literal["_vander_nd"]] -_vander_nd_flat: _FuncVanderND[Literal["_vander_nd_flat"]] +# keep in sync with `vander_nd` +@overload +def _vander_nd_flat( + vander_fs: Sequence[_AnyVanderF], + points: Sequence[_ArrayLikeFloat_co], + degrees: Sequence[SupportsIndex], +) -> _FloatArray: ... +@overload +def _vander_nd_flat( + vander_fs: Sequence[_AnyVanderF], + points: Sequence[_ArrayLikeComplex_co], + degrees: Sequence[SupportsIndex], +) -> _ComplexArray: ... +@overload +def _vander_nd_flat( + vander_fs: Sequence[_AnyVanderF], + points: Sequence[_ArrayLikeObject_co | _ArrayLikeComplex_co], + degrees: Sequence[SupportsIndex], +) -> _ObjectArray: ... +@overload +def _vander_nd_flat( + vander_fs: Sequence[_AnyVanderF], + points: Sequence[npt.ArrayLike], + degrees: Sequence[SupportsIndex], +) -> _CoefArray: ... # keep in sync with `._polytypes._FuncFromRoots` @overload -def _fromroots( # type: ignore[overload-overlap] - line_f: _AnyLineF, - mul_f: _AnyMulF, - roots: _SeriesLikeFloat_co, -) -> _FloatSeries: ... +def _fromroots(line_f: _AnyLineF, mul_f: _AnyMulF, roots: _SeriesLikeFloat_co) -> _FloatSeries: ... @overload -def _fromroots( - line_f: _AnyLineF, - mul_f: _AnyMulF, - roots: _SeriesLikeComplex_co, -) -> _ComplexSeries: ... +def _fromroots(line_f: _AnyLineF, mul_f: _AnyMulF, roots: _SeriesLikeComplex_co) -> _ComplexSeries: ... @overload -def _fromroots( - line_f: _AnyLineF, - mul_f: _AnyMulF, - roots: _SeriesLikeCoef_co, -) -> _ObjectSeries: ... +def _fromroots(line_f: _AnyLineF, mul_f: _AnyMulF, roots: _SeriesLikeObject_co) -> _ObjectSeries: ... @overload -def _fromroots( - line_f: _AnyLineF, - mul_f: _AnyMulF, - roots: _SeriesLikeCoef_co, -) -> _CoefSeries: ... +def _fromroots(line_f: _AnyLineF, mul_f: _AnyMulF, roots: _SeriesLikeCoef_co) -> _CoefSeries: ... + +# keep in sync with `_gridnd` +def _valnd[T](val_f: _ValFunc[T], c: T, *args: npt.ArrayLike) -> T: ... -_valnd: _FuncValND[Literal["_valnd"]] -_gridnd: _FuncValND[Literal["_gridnd"]] +# keep in sync with `_valnd` +def _gridnd[T](val_f: _ValFunc[T], c: T, *args: npt.ArrayLike) -> T: ... # keep in sync with `_polytypes._FuncBinOp` @overload -def _div( # type: ignore[overload-overlap] - mul_f: _AnyMulF, - c1: _SeriesLikeFloat_co, - c2: _SeriesLikeFloat_co, -) -> _Tuple2[_FloatSeries]: ... -@overload -def _div( - mul_f: _AnyMulF, - c1: _SeriesLikeComplex_co, - c2: _SeriesLikeComplex_co, -) -> _Tuple2[_ComplexSeries]: ... -@overload -def _div( - mul_f: _AnyMulF, - c1: _SeriesLikeCoef_co, - c2: _SeriesLikeCoef_co, -) -> _Tuple2[_ObjectSeries]: ... -@overload -def _div( - mul_f: _AnyMulF, - c1: _SeriesLikeCoef_co, - c2: _SeriesLikeCoef_co, -) -> _Tuple2[_CoefSeries]: ... +def _div(mul_f: _AnyMulF, c1: _SeriesLikeFloat_co, c2: _SeriesLikeFloat_co) -> _Tuple2[_FloatSeries]: ... +@overload +def _div(mul_f: _AnyMulF, c1: _SeriesLikeComplex_co, c2: _SeriesLikeComplex_co) -> _Tuple2[_ComplexSeries]: ... +@overload +def _div(mul_f: _AnyMulF, c1: _SeriesLikeObject_co, c2: _SeriesLikeObject_co) -> _Tuple2[_ObjectSeries]: ... +@overload +def _div(mul_f: _AnyMulF, c1: _SeriesLikeCoef_co, c2: _SeriesLikeCoef_co) -> _Tuple2[_CoefSeries]: ... -_add: Final[_FuncBinOp] -_sub: Final[_FuncBinOp] +_add: Final[_FuncBinOp] = ... +_sub: Final[_FuncBinOp] = ... # keep in sync with `_polytypes._FuncPow` @overload -def _pow( # type: ignore[overload-overlap] - mul_f: _AnyMulF, - c: _SeriesLikeFloat_co, - pow: _AnyInt, - maxpower: _AnyInt | None = ..., -) -> _FloatSeries: ... +def _pow(mul_f: _AnyMulF, c: _SeriesLikeFloat_co, pow: _AnyInt, maxpower: _AnyInt | None) -> _FloatSeries: ... @overload -def _pow( - mul_f: _AnyMulF, - c: _SeriesLikeComplex_co, - pow: _AnyInt, - maxpower: _AnyInt | None = ..., -) -> _ComplexSeries: ... -@overload -def _pow( - mul_f: _AnyMulF, - c: _SeriesLikeCoef_co, - pow: _AnyInt, - maxpower: _AnyInt | None = ..., -) -> _ObjectSeries: ... +def _pow(mul_f: _AnyMulF, c: _SeriesLikeComplex_co, pow: _AnyInt, maxpower: _AnyInt | None) -> _ComplexSeries: ... +@overload +def _pow(mul_f: _AnyMulF, c: _SeriesLikeObject_co, pow: _AnyInt, maxpower: _AnyInt | None) -> _ObjectSeries: ... @overload -def _pow( - mul_f: _AnyMulF, - c: _SeriesLikeCoef_co, - pow: _AnyInt, - maxpower: _AnyInt | None = ..., -) -> _CoefSeries: ... +def _pow(mul_f: _AnyMulF, c: _SeriesLikeCoef_co, pow: _AnyInt, maxpower: _AnyInt | None) -> _CoefSeries: ... # keep in sync with `_polytypes._FuncFit` @overload -def _fit( # type: ignore[overload-overlap] +def _fit( vander_f: _AnyVanderF, x: _SeriesLikeFloat_co, y: _ArrayLikeFloat_co, deg: _SeriesLikeInt_co, - domain: _SeriesLikeFloat_co | None = ..., - rcond: _FloatLike_co | None = ..., - full: Literal[False] = ..., - w: _SeriesLikeFloat_co | None = ..., + rcond: _FloatLike_co | None = None, + full: Literal[False] = False, + w: _SeriesLikeFloat_co | None = None, ) -> _FloatArray: ... @overload def _fit( @@ -378,10 +252,9 @@ def _fit( x: _SeriesLikeComplex_co, y: _ArrayLikeComplex_co, deg: _SeriesLikeInt_co, - domain: _SeriesLikeComplex_co | None = ..., - rcond: _FloatLike_co | None = ..., - full: Literal[False] = ..., - w: _SeriesLikeComplex_co | None = ..., + rcond: _FloatLike_co | None = None, + full: Literal[False] = False, + w: _SeriesLikeComplex_co | None = None, ) -> _ComplexArray: ... @overload def _fit( @@ -389,10 +262,9 @@ def _fit( x: _SeriesLikeCoef_co, y: _ArrayLikeCoef_co, deg: _SeriesLikeInt_co, - domain: _SeriesLikeCoef_co | None = ..., - rcond: _FloatLike_co | None = ..., - full: Literal[False] = ..., - w: _SeriesLikeCoef_co | None = ..., + rcond: _FloatLike_co | None = None, + full: Literal[False] = False, + w: _SeriesLikeCoef_co | None = None, ) -> _CoefArray: ... @overload def _fit( @@ -400,11 +272,9 @@ def _fit( x: _SeriesLikeCoef_co, y: _SeriesLikeCoef_co, deg: _SeriesLikeInt_co, - domain: _SeriesLikeCoef_co | None, rcond: _FloatLike_co | None, full: Literal[True], - /, - w: _SeriesLikeCoef_co | None = ..., + w: _SeriesLikeCoef_co | None = None, ) -> tuple[_CoefSeries, Sequence[np.inexact | np.int32]]: ... @overload def _fit( @@ -412,12 +282,14 @@ def _fit( x: _SeriesLikeCoef_co, y: _SeriesLikeCoef_co, deg: _SeriesLikeInt_co, - domain: _SeriesLikeCoef_co | None = ..., - rcond: _FloatLike_co | None = ..., + rcond: _FloatLike_co | None = None, *, full: Literal[True], - w: _SeriesLikeCoef_co | None = ..., + w: _SeriesLikeCoef_co | None = None, ) -> tuple[_CoefSeries, Sequence[np.inexact | np.int32]]: ... +# def _as_int(x: SupportsIndex, desc: str) -> int: ... -def format_float(x: _FloatLike_co, parens: bool = ...) -> str: ... + +# +def format_float(x: _FloatLike_co, parens: bool = False) -> str: ... diff --git a/numpy/polynomial/tests/test_chebyshev.py b/numpy/polynomial/tests/test_chebyshev.py index 2cead454631c..14777ac60375 100644 --- a/numpy/polynomial/tests/test_chebyshev.py +++ b/numpy/polynomial/tests/test_chebyshev.py @@ -6,12 +6,7 @@ import numpy as np import numpy.polynomial.chebyshev as cheb from numpy.polynomial.polynomial import polyval -from numpy.testing import ( - assert_, - assert_almost_equal, - assert_equal, - assert_raises, -) +from numpy.testing import assert_, assert_almost_equal, assert_equal, assert_raises def trim(x): diff --git a/numpy/polynomial/tests/test_classes.py b/numpy/polynomial/tests/test_classes.py index d10aafbda866..156dccf6ea88 100644 --- a/numpy/polynomial/tests/test_classes.py +++ b/numpy/polynomial/tests/test_classes.py @@ -18,12 +18,7 @@ Legendre, Polynomial, ) -from numpy.testing import ( - assert_, - assert_almost_equal, - assert_equal, - assert_raises, -) +from numpy.testing import assert_, assert_almost_equal, assert_equal, assert_raises # # fixtures diff --git a/numpy/polynomial/tests/test_hermite.py b/numpy/polynomial/tests/test_hermite.py index 8bd3951f4241..a289ba0b50cc 100644 --- a/numpy/polynomial/tests/test_hermite.py +++ b/numpy/polynomial/tests/test_hermite.py @@ -6,12 +6,7 @@ import numpy as np import numpy.polynomial.hermite as herm from numpy.polynomial.polynomial import polyval -from numpy.testing import ( - assert_, - assert_almost_equal, - assert_equal, - assert_raises, -) +from numpy.testing import assert_, assert_almost_equal, assert_equal, assert_raises H0 = np.array([1]) H1 = np.array([0, 2]) diff --git a/numpy/polynomial/tests/test_hermite_e.py b/numpy/polynomial/tests/test_hermite_e.py index 29f34f66380e..233dfb28254a 100644 --- a/numpy/polynomial/tests/test_hermite_e.py +++ b/numpy/polynomial/tests/test_hermite_e.py @@ -6,12 +6,7 @@ import numpy as np import numpy.polynomial.hermite_e as herme from numpy.polynomial.polynomial import polyval -from numpy.testing import ( - assert_, - assert_almost_equal, - assert_equal, - assert_raises, -) +from numpy.testing import assert_, assert_almost_equal, assert_equal, assert_raises He0 = np.array([1]) He1 = np.array([0, 1]) diff --git a/numpy/polynomial/tests/test_laguerre.py b/numpy/polynomial/tests/test_laguerre.py index 6793b780416d..884f15a9fe8f 100644 --- a/numpy/polynomial/tests/test_laguerre.py +++ b/numpy/polynomial/tests/test_laguerre.py @@ -6,12 +6,7 @@ import numpy as np import numpy.polynomial.laguerre as lag from numpy.polynomial.polynomial import polyval -from numpy.testing import ( - assert_, - assert_almost_equal, - assert_equal, - assert_raises, -) +from numpy.testing import assert_, assert_almost_equal, assert_equal, assert_raises L0 = np.array([1]) / 1 L1 = np.array([1, -1]) / 1 diff --git a/numpy/polynomial/tests/test_legendre.py b/numpy/polynomial/tests/test_legendre.py index d0ed7060cbe7..6c87f44ee707 100644 --- a/numpy/polynomial/tests/test_legendre.py +++ b/numpy/polynomial/tests/test_legendre.py @@ -6,12 +6,7 @@ import numpy as np import numpy.polynomial.legendre as leg from numpy.polynomial.polynomial import polyval -from numpy.testing import ( - assert_, - assert_almost_equal, - assert_equal, - assert_raises, -) +from numpy.testing import assert_, assert_almost_equal, assert_equal, assert_raises L0 = np.array([1]) L1 = np.array([0, 1]) diff --git a/numpy/polynomial/tests/test_polynomial.py b/numpy/polynomial/tests/test_polynomial.py index 27513fd682e8..4c924a758b06 100644 --- a/numpy/polynomial/tests/test_polynomial.py +++ b/numpy/polynomial/tests/test_polynomial.py @@ -6,9 +6,10 @@ from fractions import Fraction from functools import reduce +import pytest + import numpy as np import numpy.polynomial.polynomial as poly -import numpy.polynomial.polyutils as pu from numpy.testing import ( assert_, assert_almost_equal, @@ -16,7 +17,6 @@ assert_equal, assert_raises, assert_raises_regex, - assert_warns, ) @@ -656,7 +656,7 @@ def test_fit_degenerate_domain(self): assert_equal(p.coef, [2.]) p = poly.Polynomial.fit([1, 1], [2, 2.1], deg=0) assert_almost_equal(p.coef, [2.05]) - with assert_warns(pu.RankWarning): + with pytest.warns(np.exceptions.RankWarning): p = poly.Polynomial.fit([1, 1], [2, 2.1], deg=1) def test_result_type(self): @@ -667,3 +667,25 @@ def test_result_type(self): arr = np.polydiv(1, np.float32(1)) assert_equal(arr[0].dtype, np.float64) + +class ArrayFunctionInterceptor: + def __init__(self): + self.called = False + + def __array_function__(self, func, types, args, kwargs): + self.called = True + return "intercepted" + +def test_polyval2d_array_function_hook(): + x = ArrayFunctionInterceptor() + y = ArrayFunctionInterceptor() + c = ArrayFunctionInterceptor() + result = np.polynomial.polynomial.polyval2d(x, y, c) + assert result == "intercepted" + +def test_polygrid2d_array_function_hook(): + x = ArrayFunctionInterceptor() + y = ArrayFunctionInterceptor() + c = ArrayFunctionInterceptor() + result = np.polynomial.polynomial.polygrid2d(x, y, c) + assert result == "intercepted" diff --git a/numpy/polynomial/tests/test_polyutils.py b/numpy/polynomial/tests/test_polyutils.py index 96e88b9de1fa..a6f5e3990b6b 100644 --- a/numpy/polynomial/tests/test_polyutils.py +++ b/numpy/polynomial/tests/test_polyutils.py @@ -3,12 +3,7 @@ """ import numpy as np import numpy.polynomial.polyutils as pu -from numpy.testing import ( - assert_, - assert_almost_equal, - assert_equal, - assert_raises, -) +from numpy.testing import assert_, assert_almost_equal, assert_equal, assert_raises class TestMisc: diff --git a/numpy/polynomial/tests/test_printing.py b/numpy/polynomial/tests/test_printing.py index d3735e3b85f6..f7d0131c94a9 100644 --- a/numpy/polynomial/tests/test_printing.py +++ b/numpy/polynomial/tests/test_printing.py @@ -246,6 +246,7 @@ def test_linewidth_printoption(self, lw, tgt): assert_(len(line) < lw) +@pytest.mark.thread_unsafe(reason="set_default_printstyle() is global state") def test_set_default_printoptions(): p = poly.Polynomial([1, 2, 3]) c = poly.Chebyshev([1, 2, 3]) @@ -259,6 +260,7 @@ def test_set_default_printoptions(): poly.set_default_printstyle('invalid_input') +@pytest.mark.thread_unsafe(reason="set_default_printstyle() is global state") def test_complex_coefficients(): """Test both numpy and built-in complex.""" coefs = [0 + 1j, 1 + 1j, -2 + 2j, 3 + 0j] diff --git a/numpy/random/_common.pyi b/numpy/random/_common.pyi index b667fd1c82eb..417387612014 100644 --- a/numpy/random/_common.pyi +++ b/numpy/random/_common.pyi @@ -1,11 +1,12 @@ +from _typeshed import Incomplete from collections.abc import Callable -from typing import Any, NamedTuple, TypeAlias +from typing import NamedTuple import numpy as np -__all__: list[str] = ["interface"] +__all__ = ["interface"] -_CDataVoidPointer: TypeAlias = Any +type _CDataVoidPointer = Incomplete # currently not expressible class interface(NamedTuple): state_address: int diff --git a/numpy/random/_common.pyx b/numpy/random/_common.pyx index f8420b3951cc..1fc2f7a02e11 100644 --- a/numpy/random/_common.pyx +++ b/numpy/random/_common.pyx @@ -240,7 +240,6 @@ cdef np.ndarray int_to_array(object value, object name, object bits, object uint cdef validate_output_shape(iter_shape, np.ndarray output): cdef np.npy_intp *dims cdef np.npy_intp ndim, i - cdef bint error dims = np.PyArray_DIMS(output) ndim = np.PyArray_NDIM(output) output_shape = tuple((dims[i] for i in range(ndim))) @@ -296,7 +295,7 @@ cdef object double_fill(void *func, bitgen_t *state, object size, object lock, o cdef double out_val cdef double *out_array_data cdef np.ndarray out_array - cdef np.npy_intp i, n + cdef np.npy_intp n if size is None and out is None: with lock: @@ -320,7 +319,7 @@ cdef object float_fill(void *func, bitgen_t *state, object size, object lock, ob cdef float out_val cdef float *out_array_data cdef np.ndarray out_array - cdef np.npy_intp i, n + cdef np.npy_intp n if size is None and out is None: with lock: @@ -427,7 +426,6 @@ cdef int check_array_constraint(np.ndarray val, object name, constraint_type con cdef int check_constraint(double val, object name, constraint_type cons) except -1: - cdef bint is_nan if cons == CONS_NON_NEGATIVE: if not isnan(val) and signbit(val): raise ValueError(f"{name} < 0") @@ -760,7 +758,6 @@ cdef object discrete_broadcast_di(void *func, void *state, object size, object l np.ndarray a_arr, object a_name, constraint_type a_constraint, np.ndarray b_arr, object b_name, constraint_type b_constraint): cdef np.ndarray randoms - cdef int64_t *randoms_data cdef np.broadcast it cdef random_uint_di f = (func) cdef np.npy_intp i, n @@ -777,7 +774,6 @@ cdef object discrete_broadcast_di(void *func, void *state, object size, object l it = np.PyArray_MultiIterNew2(a_arr, b_arr) randoms = np.empty(it.shape, np.int64) - randoms_data = np.PyArray_DATA(randoms) n = np.PyArray_SIZE(randoms) it = np.PyArray_MultiIterNew3(randoms, a_arr, b_arr) @@ -1047,7 +1043,7 @@ cdef object cont_f(void *func, bitgen_t *state, object size, object lock, object a, object a_name, constraint_type a_constraint, object out): - cdef np.ndarray a_arr, b_arr, c_arr + cdef np.ndarray a_arr cdef float _a cdef bint is_scalar = True cdef int requirements = np.NPY_ARRAY_ALIGNED | np.NPY_ARRAY_FORCECAST diff --git a/numpy/random/_examples/cython/extending_distributions.pyx b/numpy/random/_examples/cython/extending_distributions.pyx index e1d1ea6c820b..8de722686304 100644 --- a/numpy/random/_examples/cython/extending_distributions.pyx +++ b/numpy/random/_examples/cython/extending_distributions.pyx @@ -92,7 +92,6 @@ def uniforms_ex(bit_generator, Py_ssize_t n, dtype=np.float64): Desired dtype, either 'd' (or 'float64') or 'f' (or 'float32'). The default dtype value is 'd' """ - cdef Py_ssize_t i cdef bitgen_t *rng cdef const char *capsule_name = "BitGenerator" cdef np.ndarray randoms diff --git a/numpy/random/_generator.pyi b/numpy/random/_generator.pyi index dc78a76eda70..f3fb9bb7baf5 100644 --- a/numpy/random/_generator.pyi +++ b/numpy/random/_generator.pyi @@ -1,856 +1,756 @@ -from collections.abc import Callable -from typing import Any, Literal, TypeAlias, TypeVar, overload +from collections.abc import Callable, MutableSequence +from typing import Any, Literal, Self, overload import numpy as np -from numpy import dtype, float32, float64, int64 from numpy._typing import ( ArrayLike, DTypeLike, NDArray, + _ArrayLike, _ArrayLikeFloat_co, _ArrayLikeInt_co, - _BoolCodes, - _DoubleCodes, _DTypeLike, - _DTypeLikeBool, _Float32Codes, _Float64Codes, _FloatLike_co, - _Int8Codes, - _Int16Codes, - _Int32Codes, _Int64Codes, - _IntPCodes, + _NestedSequence, _ShapeLike, - _SingleCodes, - _SupportsDType, - _UInt8Codes, - _UInt16Codes, - _UInt32Codes, - _UInt64Codes, - _UIntPCodes, ) -from numpy.random import BitGenerator, RandomState, SeedSequence -_IntegerT = TypeVar("_IntegerT", bound=np.integer) +from .bit_generator import BitGenerator, SeedSequence +from .mtrand import RandomState -_DTypeLikeFloat32: TypeAlias = ( - dtype[float32] - | _SupportsDType[dtype[float32]] - | type[float32] - | _Float32Codes - | _SingleCodes -) +type _ArrayF32 = NDArray[np.float32] +type _ArrayF64 = NDArray[np.float64] -_DTypeLikeFloat64: TypeAlias = ( - dtype[float64] - | _SupportsDType[dtype[float64]] - | type[float] - | type[float64] - | _Float64Codes - | _DoubleCodes -) +type _DTypeLikeI64 = _DTypeLike[np.int64] | _Int64Codes +type _DTypeLikeF32 = _DTypeLike[np.float32] | _Float32Codes +type _DTypeLikeF64 = type[float] | _DTypeLike[np.float64] | _Float64Codes +# we use `str` to avoid type-checker performance issues because of the many `Literal` variants +type _DTypeLikeFloat = type[float] | _DTypeLike[np.float32 | np.float64] | str + +# Similar to `_ArrayLike{}_co`, but rejects scalars +type _NDArrayLikeInt = NDArray[np.generic[int]] | _NestedSequence[int] +type _NDArrayLikeFloat = NDArray[np.generic[float]] | _NestedSequence[float] + +type _MethodExp = Literal["zig", "inv"] + +### class Generator: def __init__(self, bit_generator: BitGenerator) -> None: ... - def __repr__(self) -> str: ... - def __str__(self) -> str: ... - def __getstate__(self) -> None: ... def __setstate__(self, state: dict[str, Any] | None) -> None: ... - def __reduce__(self) -> tuple[ - Callable[[BitGenerator], Generator], - tuple[BitGenerator], - None]: ... + def __reduce__(self) -> tuple[Callable[[BitGenerator], Generator], tuple[BitGenerator], None]: ... + + # @property def bit_generator(self) -> BitGenerator: ... - def spawn(self, n_children: int) -> list[Generator]: ... + def spawn(self, n_children: int) -> list[Self]: ... def bytes(self, length: int) -> bytes: ... + + # continuous distributions + + # @overload - def standard_normal( # type: ignore[misc] - self, - size: None = ..., - dtype: _DTypeLikeFloat32 | _DTypeLikeFloat64 = ..., - out: None = ..., - ) -> float: ... - @overload - def standard_normal( # type: ignore[misc] - self, - size: _ShapeLike = ..., - ) -> NDArray[float64]: ... - @overload - def standard_normal( # type: ignore[misc] - self, - *, - out: NDArray[float64] = ..., - ) -> NDArray[float64]: ... - @overload - def standard_normal( # type: ignore[misc] - self, - size: _ShapeLike = ..., - dtype: _DTypeLikeFloat32 = ..., - out: NDArray[float32] | None = ..., - ) -> NDArray[float32]: ... - @overload - def standard_normal( # type: ignore[misc] - self, - size: _ShapeLike = ..., - dtype: _DTypeLikeFloat64 = ..., - out: NDArray[float64] | None = ..., - ) -> NDArray[float64]: ... - @overload - def permutation(self, x: int, axis: int = ...) -> NDArray[int64]: ... - @overload - def permutation(self, x: ArrayLike, axis: int = ...) -> NDArray[Any]: ... - @overload - def standard_exponential( # type: ignore[misc] - self, - size: None = ..., - dtype: _DTypeLikeFloat32 | _DTypeLikeFloat64 = ..., - method: Literal["zig", "inv"] = ..., - out: None = ..., - ) -> float: ... - @overload - def standard_exponential( - self, - size: _ShapeLike = ..., - ) -> NDArray[float64]: ... - @overload - def standard_exponential( - self, - *, - out: NDArray[float64] = ..., - ) -> NDArray[float64]: ... + def standard_cauchy(self, size: None = None) -> float: ... @overload + def standard_cauchy(self, size: _ShapeLike) -> _ArrayF64: ... + + # + @overload # size=None (default); NOTE: dtype is ignored + def random(self, size: None = None, dtype: _DTypeLikeFloat = ..., out: None = None) -> float: ... + @overload # size=, dtype=f64 (default) + def random(self, size: _ShapeLike, dtype: _DTypeLikeF64 = ..., out: None = None) -> _ArrayF64: ... + @overload # size=, dtype=f32 + def random(self, size: _ShapeLike, dtype: _DTypeLikeF32, out: None = None) -> _ArrayF32: ... + @overload # out: f64 array (keyword) + def random[ArrayT: _ArrayF64](self, size: _ShapeLike | None = None, dtype: _DTypeLikeF64 = ..., *, out: ArrayT) -> ArrayT: ... + @overload # dtype: f32 (keyword), out: f64 array + def random[ArrayT: _ArrayF32](self, size: _ShapeLike | None = None, *, dtype: _DTypeLikeF32, out: ArrayT) -> ArrayT: ... + @overload # out: f64 array (positional) + def random[ArrayT: _ArrayF64](self, size: _ShapeLike | None, dtype: _DTypeLikeF64, out: ArrayT) -> ArrayT: ... + @overload # dtype: f32 (positional), out: f32 array + def random[ArrayT: _ArrayF32](self, size: _ShapeLike | None, dtype: _DTypeLikeF32, out: ArrayT) -> ArrayT: ... + + # + @overload # size=None (default); NOTE: dtype is ignored + def standard_normal(self, size: None = None, dtype: _DTypeLikeFloat = ..., out: None = None) -> float: ... + @overload # size=, dtype: f64 (default) + def standard_normal(self, size: _ShapeLike, dtype: _DTypeLikeF64 = ..., out: None = None) -> _ArrayF64: ... + @overload # size=, dtype: f32 + def standard_normal(self, size: _ShapeLike, dtype: _DTypeLikeF32, *, out: None = None) -> _ArrayF32: ... + @overload # dtype: f64 (default), out: f64 array (keyword) + def standard_normal[ArrayT: _ArrayF64]( + self, size: _ShapeLike | None = None, dtype: _DTypeLikeF64 = ..., *, out: ArrayT + ) -> ArrayT: ... + @overload # dtype: f32 (keyword), out: f32 array + def standard_normal[ArrayT: _ArrayF32]( + self, size: _ShapeLike | None = None, *, dtype: _DTypeLikeF32, out: ArrayT + ) -> ArrayT: ... + @overload # dtype: f32 (positional), out: f32 array + def standard_normal[ArrayT: _ArrayF32](self, size: _ShapeLike | None, dtype: _DTypeLikeF32, out: ArrayT) -> ArrayT: ... + + # + @overload # size=None (default); NOTE: dtype is ignored def standard_exponential( - self, - size: _ShapeLike = ..., - *, - method: Literal["zig", "inv"] = ..., - out: NDArray[float64] | None = ..., - ) -> NDArray[float64]: ... - @overload + self, size: None = None, dtype: _DTypeLikeFloat = ..., method: _MethodExp = "zig", out: None = None + ) -> float: ... + @overload # size=, dtype: f64 (default) def standard_exponential( - self, - size: _ShapeLike = ..., - dtype: _DTypeLikeFloat32 = ..., - method: Literal["zig", "inv"] = ..., - out: NDArray[float32] | None = ..., - ) -> NDArray[float32]: ... - @overload + self, size: _ShapeLike, dtype: _DTypeLikeF64 = ..., method: _MethodExp = "zig", out: None = None + ) -> _ArrayF64: ... + @overload # size=, dtype: f32 (default) def standard_exponential( - self, - size: _ShapeLike = ..., - dtype: _DTypeLikeFloat64 = ..., - method: Literal["zig", "inv"] = ..., - out: NDArray[float64] | None = ..., - ) -> NDArray[float64]: ... - @overload - def random( # type: ignore[misc] - self, - size: None = ..., - dtype: _DTypeLikeFloat32 | _DTypeLikeFloat64 = ..., - out: None = ..., + self, size: _ShapeLike, dtype: _DTypeLikeF32, method: _MethodExp = "zig", out: None = None + ) -> _ArrayF32: ... + @overload # dtype: f64 (default), out: f64 array (keyword) + def standard_exponential[ArrayT: _ArrayF64]( + self, size: _ShapeLike | None = None, dtype: _DTypeLikeF64 = ..., method: _MethodExp = "zig", *, out: ArrayT + ) -> ArrayT: ... + @overload # dtype: f32 (keyword), out: f32 array + def standard_exponential[ArrayT: _ArrayF32]( + self, size: _ShapeLike | None = None, *, dtype: _DTypeLikeF32, method: _MethodExp = "zig", out: ArrayT + ) -> ArrayT: ... + @overload # dtype: f32 (positional), out: f32 array (keyword) + def standard_exponential[ArrayT: _ArrayF32]( + self, size: _ShapeLike | None, dtype: _DTypeLikeF32, method: _MethodExp = "zig", *, out: ArrayT + ) -> ArrayT: ... + + # + @overload # 0d, size=None (default); NOTE: dtype is ignored + def standard_gamma( + self, shape: _FloatLike_co, size: None = None, dtype: _DTypeLikeFloat = ..., out: None = None ) -> float: ... - @overload - def random( - self, - *, - out: NDArray[float64] = ..., - ) -> NDArray[float64]: ... - @overload - def random( - self, - size: _ShapeLike = ..., - *, - out: NDArray[float64] | None = ..., - ) -> NDArray[float64]: ... - @overload - def random( - self, - size: _ShapeLike = ..., - dtype: _DTypeLikeFloat32 = ..., - out: NDArray[float32] | None = ..., - ) -> NDArray[float32]: ... - @overload - def random( - self, - size: _ShapeLike = ..., - dtype: _DTypeLikeFloat64 = ..., - out: NDArray[float64] | None = ..., - ) -> NDArray[float64]: ... - @overload - def beta( - self, - a: _FloatLike_co, - b: _FloatLike_co, - size: None = ..., - ) -> float: ... # type: ignore[misc] - @overload - def beta( - self, - a: _ArrayLikeFloat_co, - b: _ArrayLikeFloat_co, - size: _ShapeLike | None = ... - ) -> NDArray[float64]: ... - @overload - def exponential(self, scale: _FloatLike_co = ..., size: None = ...) -> float: ... # type: ignore[misc] - @overload - def exponential(self, scale: _ArrayLikeFloat_co = ..., size: _ShapeLike | None = ...) -> NDArray[float64]: ... + @overload # >0d, dtype: f64 (default) + def standard_gamma( + self, shape: _NDArrayLikeFloat, size: None = None, dtype: _DTypeLikeF64 = ..., out: None = None + ) -> _ArrayF64: ... + @overload # >0d, dtype: f32 (keyword) + def standard_gamma( + self, shape: _NDArrayLikeFloat, size: None = None, *, dtype: _DTypeLikeF32, out: None = None + ) -> _ArrayF32: ... + @overload # >=0d, dtype: f64 (default) + def standard_gamma( + self, shape: _ArrayLikeFloat_co, size: None = None, dtype: _DTypeLikeF64 = ..., out: None = None + ) -> _ArrayF64 | Any: ... + @overload # >=0d, dtype: f32 (keyword) + def standard_gamma( + self, shape: _ArrayLikeFloat_co, size: None = None, *, dtype: _DTypeLikeF32, out: None = None + ) -> _ArrayF32 | Any: ... + @overload # >=0d, size=, dtype: f64 (default) + def standard_gamma( + self, shape: _ArrayLikeFloat_co, size: _ShapeLike, dtype: _DTypeLikeF64 = ..., out: None = None + ) -> _ArrayF64: ... + @overload # >=0d, size=, dtype: f32 + def standard_gamma( + self, shape: _ArrayLikeFloat_co, size: _ShapeLike, dtype: _DTypeLikeF32, *, out: None = None + ) -> _ArrayF32: ... + @overload # >=0d, dtype: f64 (default), out: f64 array (keyword) + def standard_gamma[ArrayT: _ArrayF64]( + self, shape: _ArrayLikeFloat_co, size: _ShapeLike | None = None, dtype: _DTypeLikeF64 = ..., *, out: ArrayT + ) -> ArrayT: ... + @overload # >=0d, dtype: f32 (keyword), out: f32 array + def standard_gamma[ArrayT: _ArrayF32]( + self, shape: _ArrayLikeFloat_co, size: _ShapeLike | None = None, *, dtype: _DTypeLikeF32, out: ArrayT + ) -> ArrayT: ... + @overload # >=0d, dtype: f32 (positional), out: f32 array + def standard_gamma[ArrayT: _ArrayF32]( + self, shape: _ArrayLikeFloat_co, size: _ShapeLike | None, dtype: _DTypeLikeF32, out: ArrayT + ) -> ArrayT: ... # - @overload + @overload # 0d + def power(self, /, a: _FloatLike_co, size: None = None) -> float: ... + @overload # size= + def power(self, /, a: _ArrayLikeFloat_co, size: _ShapeLike) -> _ArrayF64: ... + @overload # >0d + def power(self, /, a: _NDArrayLikeFloat, size: None = None) -> _ArrayF64: ... + @overload # >=0d + def power(self, /, a: _ArrayLikeFloat_co, size: None = None) -> _ArrayF64 | Any: ... + + # + @overload # 0d + def pareto(self, /, a: _FloatLike_co, size: None = None) -> float: ... + @overload # size= + def pareto(self, /, a: _ArrayLikeFloat_co, size: _ShapeLike) -> _ArrayF64: ... + @overload # >0d + def pareto(self, /, a: _NDArrayLikeFloat, size: None = None) -> _ArrayF64: ... + @overload # >=0d + def pareto(self, /, a: _ArrayLikeFloat_co, size: None = None) -> _ArrayF64 | Any: ... + + # + @overload # 0d + def weibull(self, /, a: _FloatLike_co, size: None = None) -> float: ... + @overload # size= + def weibull(self, /, a: _ArrayLikeFloat_co, size: _ShapeLike) -> _ArrayF64: ... + @overload # >0d + def weibull(self, /, a: _NDArrayLikeFloat, size: None = None) -> _ArrayF64: ... + @overload # >=0d + def weibull(self, /, a: _ArrayLikeFloat_co, size: None = None) -> _ArrayF64 | Any: ... + + # + @overload # 0d + def standard_t(self, /, df: _FloatLike_co, size: None = None) -> float: ... + @overload # size= + def standard_t(self, /, df: _ArrayLikeFloat_co, size: _ShapeLike) -> _ArrayF64: ... + @overload # >0d + def standard_t(self, /, df: _NDArrayLikeFloat, size: None = None) -> _ArrayF64: ... + @overload # >=0d + def standard_t(self, /, df: _ArrayLikeFloat_co, size: None = None) -> _ArrayF64 | Any: ... + + # + @overload # 0d + def chisquare(self, /, df: _FloatLike_co, size: None = None) -> float: ... + @overload # size= + def chisquare(self, /, df: _ArrayLikeFloat_co, size: _ShapeLike) -> _ArrayF64: ... + @overload # >0d + def chisquare(self, /, df: _NDArrayLikeFloat, size: None = None) -> _ArrayF64: ... + @overload # >=0d + def chisquare(self, /, df: _ArrayLikeFloat_co, size: None = None) -> _ArrayF64 | Any: ... + + # + @overload # 0d (default) + def exponential(self, /, scale: _FloatLike_co = 1.0, size: None = None) -> float: ... + @overload # size= (keyword) + def exponential(self, /, scale: _ArrayLikeFloat_co = 1.0, *, size: _ShapeLike) -> _ArrayF64: ... + @overload # size= (positional) + def exponential(self, /, scale: _ArrayLikeFloat_co, size: _ShapeLike) -> _ArrayF64: ... + @overload # >0d + def exponential(self, /, scale: _NDArrayLikeFloat, size: None = None) -> _ArrayF64: ... + @overload # >=0d + def exponential(self, /, scale: _ArrayLikeFloat_co, size: None = None) -> _ArrayF64 | Any: ... + + # + @overload # 0d (default) + def rayleigh(self, /, scale: _FloatLike_co = 1.0, size: None = None) -> float: ... + @overload # size= (keyword) + def rayleigh(self, /, scale: _ArrayLikeFloat_co = 1.0, *, size: _ShapeLike) -> _ArrayF64: ... + @overload # size= (positional) + def rayleigh(self, /, scale: _ArrayLikeFloat_co, size: _ShapeLike) -> _ArrayF64: ... + @overload # >0d + def rayleigh(self, /, scale: _NDArrayLikeFloat, size: None = None) -> _ArrayF64: ... + @overload # >=0d + def rayleigh(self, /, scale: _ArrayLikeFloat_co, size: None = None) -> _ArrayF64 | Any: ... + + # + @overload # 0d, 0d + def noncentral_chisquare(self, /, df: _FloatLike_co, nonc: _FloatLike_co, size: None = None) -> float: ... + @overload # size= + def noncentral_chisquare(self, /, df: _ArrayLikeFloat_co, nonc: _ArrayLikeFloat_co, size: _ShapeLike) -> _ArrayF64: ... + @overload # >=0d, >0d + def noncentral_chisquare(self, /, df: _ArrayLikeFloat_co, nonc: _NDArrayLikeFloat, size: None = None) -> _ArrayF64: ... + @overload # >0d, >=0d + def noncentral_chisquare(self, /, df: _NDArrayLikeFloat, nonc: _ArrayLikeFloat_co, size: None = None) -> _ArrayF64: ... + @overload # >=0d, >=0d + def noncentral_chisquare(self, /, df: _ArrayLikeFloat_co, nonc: _ArrayLikeFloat_co, size: None = None) -> _ArrayF64 | Any: ... + + # + @overload # 0d, 0d + def f(self, /, dfnum: _FloatLike_co, dfden: _FloatLike_co, size: None = None) -> float: ... + @overload # size= + def f(self, /, dfnum: _ArrayLikeFloat_co, dfden: _ArrayLikeFloat_co, size: _ShapeLike) -> _ArrayF64: ... + @overload # >=0d, >0d + def f(self, /, dfnum: _ArrayLikeFloat_co, dfden: _NDArrayLikeFloat, size: None = None) -> _ArrayF64: ... + @overload # >0d, >=0d + def f(self, /, dfnum: _NDArrayLikeFloat, dfden: _ArrayLikeFloat_co, size: None = None) -> _ArrayF64: ... + @overload # >=0d, >=0d (fallback) + def f(self, /, dfnum: _ArrayLikeFloat_co, dfden: _ArrayLikeFloat_co, size: None = None) -> _ArrayF64 | Any: ... + + # + @overload # 0d, 0d + def vonmises(self, /, mu: _FloatLike_co, kappa: _FloatLike_co, size: None = None) -> float: ... + @overload # size= + def vonmises(self, /, mu: _ArrayLikeFloat_co, kappa: _ArrayLikeFloat_co, size: _ShapeLike) -> _ArrayF64: ... + @overload # >=0d, >0d + def vonmises(self, /, mu: _ArrayLikeFloat_co, kappa: _NDArrayLikeFloat, size: None = None) -> _ArrayF64: ... + @overload # >0d, >=0d + def vonmises(self, /, mu: _NDArrayLikeFloat, kappa: _ArrayLikeFloat_co, size: None = None) -> _ArrayF64: ... + @overload # >=0d, >=0d (fallback) + def vonmises(self, /, mu: _ArrayLikeFloat_co, kappa: _ArrayLikeFloat_co, size: None = None) -> _ArrayF64 | Any: ... + + # + @overload # 0d, 0d + def wald(self, /, mean: _FloatLike_co, scale: _FloatLike_co, size: None = None) -> float: ... + @overload # size= + def wald(self, /, mean: _ArrayLikeFloat_co, scale: _ArrayLikeFloat_co, size: _ShapeLike) -> _ArrayF64: ... + @overload # >=0d, >0d + def wald(self, /, mean: _ArrayLikeFloat_co, scale: _NDArrayLikeFloat, size: None = None) -> _ArrayF64: ... + @overload # >0d, >=0d + def wald(self, /, mean: _NDArrayLikeFloat, scale: _ArrayLikeFloat_co, size: None = None) -> _ArrayF64: ... + @overload # >=0d, >=0d (fallback) + def wald(self, /, mean: _ArrayLikeFloat_co, scale: _ArrayLikeFloat_co, size: None = None) -> _ArrayF64 | Any: ... + + # + @overload # 0d, 0d + def beta(self, /, a: _FloatLike_co, b: _FloatLike_co, size: None = None) -> float: ... + @overload # size= + def beta(self, /, a: _ArrayLikeFloat_co, b: _ArrayLikeFloat_co, size: _ShapeLike) -> _ArrayF64: ... + @overload # >=0d, >0d + def beta(self, /, a: _ArrayLikeFloat_co, b: _NDArrayLikeFloat, size: None = None) -> _ArrayF64: ... + @overload # >0d, >=0d + def beta(self, /, a: _NDArrayLikeFloat, b: _ArrayLikeFloat_co, size: None = None) -> _ArrayF64: ... + @overload # >=0d, >=0d (fallback) + def beta(self, /, a: _ArrayLikeFloat_co, b: _ArrayLikeFloat_co, size: None = None) -> _ArrayF64 | Any: ... + + # + @overload # 0d, 0d (default) + def gamma(self, /, shape: _FloatLike_co, scale: _FloatLike_co = 1.0, size: None = None) -> float: ... + @overload # size= (positional) + def gamma(self, /, shape: _ArrayLikeFloat_co, scale: _ArrayLikeFloat_co, size: _ShapeLike) -> _ArrayF64: ... + @overload # size= (keyword) + def gamma(self, /, shape: _ArrayLikeFloat_co, scale: _ArrayLikeFloat_co = 1.0, *, size: _ShapeLike) -> _ArrayF64: ... + @overload # >=0d, >0d + def gamma(self, /, shape: _ArrayLikeFloat_co, scale: _NDArrayLikeFloat, size: None = None) -> _ArrayF64: ... + @overload # >0d, >=0d + def gamma(self, /, shape: _NDArrayLikeFloat, scale: _ArrayLikeFloat_co = 1.0, size: None = None) -> _ArrayF64: ... + @overload # >=0d, >=0d (fallback) + def gamma(self, /, shape: _ArrayLikeFloat_co, scale: _ArrayLikeFloat_co = 1.0, size: None = None) -> _ArrayF64 | Any: ... + + # + @overload # 0d (default), 0d (default) + def uniform(self, /, low: _FloatLike_co = 0.0, high: _FloatLike_co = 1.0, size: None = None) -> float: ... + @overload # >=0d, >=0d, size= (positional) + def uniform(self, /, low: _ArrayLikeFloat_co, high: _ArrayLikeFloat_co, size: _ShapeLike) -> _ArrayF64: ... + @overload # >=0d, >0d (positional) + def uniform(self, /, low: _ArrayLikeFloat_co, high: _NDArrayLikeFloat, size: None = None) -> _ArrayF64: ... + @overload # >=0d, >=0d, size= (keyword) + def uniform(self, /, low: _ArrayLikeFloat_co = 0.0, high: _ArrayLikeFloat_co = 1.0, *, size: _ShapeLike) -> _ArrayF64: ... + @overload # >=0d, >0d (keyword) + def uniform(self, /, low: _ArrayLikeFloat_co = 0.0, *, high: _NDArrayLikeFloat, size: None = None) -> _ArrayF64: ... + @overload # >0d, >=0d + def uniform(self, /, low: _NDArrayLikeFloat, high: _ArrayLikeFloat_co = 1.0, size: None = None) -> _ArrayF64: ... + @overload # >=0d, >=0d (fallback) + def uniform(self, /, low: _ArrayLikeFloat_co = 0.0, high: _ArrayLikeFloat_co = 1.0, size: None = None) -> _ArrayF64 | Any: ... + + # + @overload # 0d (default), 0d (default) + def normal(self, /, loc: _FloatLike_co = 0.0, scale: _FloatLike_co = 1.0, size: None = None) -> float: ... + @overload # size= (positional) + def normal(self, /, loc: _ArrayLikeFloat_co, scale: _ArrayLikeFloat_co, size: _ShapeLike) -> _ArrayF64: ... + @overload # >=0d, >0d (positional) + def normal(self, /, loc: _ArrayLikeFloat_co, scale: _NDArrayLikeFloat, size: None) -> _ArrayF64: ... + @overload # size= (keyword) + def normal(self, /, loc: _ArrayLikeFloat_co = 0.0, scale: _ArrayLikeFloat_co = 1.0, *, size: _ShapeLike) -> _ArrayF64: ... + @overload # >=0d, >0d (keyword) + def normal(self, /, loc: _ArrayLikeFloat_co = 0.0, *, scale: _NDArrayLikeFloat, size: None = None) -> _ArrayF64: ... + @overload # >0d, >=0d + def normal(self, /, loc: _NDArrayLikeFloat, scale: _ArrayLikeFloat_co = 1.0, size: None = None) -> _ArrayF64: ... + @overload # >=0d, >=0d + def normal(self, /, loc: _ArrayLikeFloat_co = 0.0, scale: _ArrayLikeFloat_co = 1.0, size: None = None) -> _ArrayF64 | Any: ... + + # + @overload # 0d (default), 0d (default) + def gumbel(self, /, loc: _FloatLike_co = 0.0, scale: _FloatLike_co = 1.0, size: None = None) -> float: ... + @overload # size= (positional) + def gumbel(self, /, loc: _ArrayLikeFloat_co, scale: _ArrayLikeFloat_co, size: _ShapeLike) -> _ArrayF64: ... + @overload # >=0d, >0d (positional) + def gumbel(self, /, loc: _ArrayLikeFloat_co, scale: _NDArrayLikeFloat, size: None) -> _ArrayF64: ... + @overload # size= (keyword) + def gumbel(self, /, loc: _ArrayLikeFloat_co = 0.0, scale: _ArrayLikeFloat_co = 1.0, *, size: _ShapeLike) -> _ArrayF64: ... + @overload # >=0d, >0d (keyword) + def gumbel(self, /, loc: _ArrayLikeFloat_co = 0.0, *, scale: _NDArrayLikeFloat, size: None = None) -> _ArrayF64: ... + @overload # >0d, >=0d + def gumbel(self, /, loc: _NDArrayLikeFloat, scale: _ArrayLikeFloat_co = 1.0, size: None = None) -> _ArrayF64: ... + @overload # >=0d, >=0d + def gumbel(self, /, loc: _ArrayLikeFloat_co = 0.0, scale: _ArrayLikeFloat_co = 1.0, size: None = None) -> _ArrayF64 | Any: ... + + # + @overload # 0d (default), 0d (default) + def logistic(self, /, loc: _FloatLike_co = 0.0, scale: _FloatLike_co = 1.0, size: None = None) -> float: ... + @overload # size= (positional) + def logistic(self, /, loc: _ArrayLikeFloat_co, scale: _ArrayLikeFloat_co, size: _ShapeLike) -> _ArrayF64: ... + @overload # >=0d, >0d (positional) + def logistic(self, /, loc: _ArrayLikeFloat_co, scale: _NDArrayLikeFloat, size: None) -> _ArrayF64: ... + @overload # size= (keyword) + def logistic(self, /, loc: _ArrayLikeFloat_co = 0.0, scale: _ArrayLikeFloat_co = 1.0, *, size: _ShapeLike) -> _ArrayF64: ... + @overload # >=0d, >0d (keyword) + def logistic(self, /, loc: _ArrayLikeFloat_co = 0.0, *, scale: _NDArrayLikeFloat, size: None = None) -> _ArrayF64: ... + @overload # >0d, >=0d + def logistic(self, /, loc: _NDArrayLikeFloat, scale: _ArrayLikeFloat_co = 1.0, size: None = None) -> _ArrayF64: ... + @overload # >=0d, >=0d + def logistic( + self, /, loc: _ArrayLikeFloat_co = 0.0, scale: _ArrayLikeFloat_co = 1.0, size: None = None + ) -> _ArrayF64 | Any: ... + + # + @overload # 0d (default), 0d (default) + def laplace(self, /, loc: _FloatLike_co = 0.0, scale: _FloatLike_co = 1.0, size: None = None) -> float: ... + @overload # size= (positional) + def laplace(self, /, loc: _ArrayLikeFloat_co, scale: _ArrayLikeFloat_co, size: _ShapeLike) -> _ArrayF64: ... + @overload # >=0d, >0d (positional) + def laplace(self, /, loc: _ArrayLikeFloat_co, scale: _NDArrayLikeFloat, size: None) -> _ArrayF64: ... + @overload # size= (keyword) + def laplace(self, /, loc: _ArrayLikeFloat_co = 0.0, scale: _ArrayLikeFloat_co = 1.0, *, size: _ShapeLike) -> _ArrayF64: ... + @overload # >=0d, >0d (keyword) + def laplace(self, /, loc: _ArrayLikeFloat_co = 0.0, *, scale: _NDArrayLikeFloat, size: None = None) -> _ArrayF64: ... + @overload # >0d, >=0d + def laplace(self, /, loc: _NDArrayLikeFloat, scale: _ArrayLikeFloat_co = 1.0, size: None = None) -> _ArrayF64: ... + @overload # >=0d, >=0d + def laplace( + self, /, loc: _ArrayLikeFloat_co = 0.0, scale: _ArrayLikeFloat_co = 1.0, size: None = None + ) -> _ArrayF64 | Any: ... + + # + @overload # 0d (default), 0d (default) + def lognormal(self, /, mean: _FloatLike_co = 0.0, sigma: _FloatLike_co = 1.0, size: None = None) -> float: ... + @overload # size= (positional) + def lognormal(self, /, mean: _ArrayLikeFloat_co, sigma: _ArrayLikeFloat_co, size: _ShapeLike) -> _ArrayF64: ... + @overload # >=0d, >0d (positional) + def lognormal(self, /, mean: _ArrayLikeFloat_co, sigma: _NDArrayLikeFloat, size: None = None) -> _ArrayF64: ... + @overload # size= (keyword) + def lognormal(self, /, mean: _ArrayLikeFloat_co = 0.0, sigma: _ArrayLikeFloat_co = 1.0, *, size: _ShapeLike) -> _ArrayF64: ... + @overload # >=0d, >0d (keyword) + def lognormal(self, /, mean: _ArrayLikeFloat_co = 0.0, *, sigma: _NDArrayLikeFloat, size: None = None) -> _ArrayF64: ... + @overload # >0d, >=0d + def lognormal(self, /, mean: _NDArrayLikeFloat, sigma: _ArrayLikeFloat_co = 1.0, size: None = None) -> _ArrayF64: ... + @overload # >=0d, >=0d + def lognormal( + self, /, mean: _ArrayLikeFloat_co = 0.0, sigma: _ArrayLikeFloat_co = 1.0, size: None = None + ) -> _ArrayF64 | Any: ... + + # + @overload # 0d, 0d, 0d + def triangular(self, /, left: _FloatLike_co, mode: _FloatLike_co, right: _FloatLike_co, size: None = None) -> float: ... + @overload # >=0d, >=0d, >=0d, size= + def triangular( + self, /, left: _ArrayLikeFloat_co, mode: _ArrayLikeFloat_co, right: _ArrayLikeFloat_co, size: _ShapeLike + ) -> _ArrayF64: ... + @overload # >=0d, >=0d, >0d + def triangular( + self, /, left: _ArrayLikeFloat_co, mode: _ArrayLikeFloat_co, right: _NDArrayLikeFloat, size: None = None + ) -> _ArrayF64: ... + @overload # >=0d, >0d, >=0d + def triangular( + self, /, left: _ArrayLikeFloat_co, mode: _NDArrayLikeFloat, right: _ArrayLikeFloat_co, size: None = None + ) -> _ArrayF64: ... + @overload # >0d, >=0d, >=0d + def triangular( + self, /, left: _NDArrayLikeFloat, mode: _ArrayLikeFloat_co, right: _ArrayLikeFloat_co, size: None = None + ) -> _ArrayF64: ... + @overload # >=0d, >=0d, >=0d (fallback) + def triangular( + self, /, left: _ArrayLikeFloat_co, mode: _ArrayLikeFloat_co, right: _ArrayLikeFloat_co, size: None = None + ) -> _ArrayF64 | Any: ... + + # + @overload # 0d, 0d, 0d + def noncentral_f(self, /, dfnum: _FloatLike_co, dfden: _FloatLike_co, nonc: _FloatLike_co, size: None = None) -> float: ... + @overload # >=0d, >=0d, >=0d, size= + def noncentral_f( + self, /, dfnum: _ArrayLikeFloat_co, dfden: _ArrayLikeFloat_co, nonc: _ArrayLikeFloat_co, size: _ShapeLike + ) -> _ArrayF64: ... + @overload # >=0d, >=0d, >0d + def noncentral_f( + self, /, dfnum: _ArrayLikeFloat_co, dfden: _ArrayLikeFloat_co, nonc: _NDArrayLikeFloat, size: None = None + ) -> _ArrayF64: ... + @overload # >=0d, >0d, >=0d + def noncentral_f( + self, /, dfnum: _ArrayLikeFloat_co, dfden: _NDArrayLikeFloat, nonc: _ArrayLikeFloat_co, size: None = None + ) -> _ArrayF64: ... + @overload # >0d, >=0d, >=0d + def noncentral_f( + self, /, dfnum: _NDArrayLikeFloat, dfden: _ArrayLikeFloat_co, nonc: _ArrayLikeFloat_co, size: None = None + ) -> _ArrayF64: ... + @overload # >=0d, >=0d, >=0d (fallback) + def noncentral_f( + self, /, dfnum: _ArrayLikeFloat_co, dfden: _ArrayLikeFloat_co, nonc: _ArrayLikeFloat_co, size: None = None + ) -> _ArrayF64 | Any: ... + + ### + # discrete + + # + @overload # 0d bool | int + def integers[AnyIntT: (bool, int)]( + self, low: int, high: int | None = None, size: None = None, *, dtype: type[AnyIntT], endpoint: bool = False + ) -> AnyIntT: ... + @overload # 0d integer dtype + def integers[ScalarT: np.integer | np.bool]( + self, low: int, high: int | None = None, size: None = None, *, dtype: _DTypeLike[ScalarT], endpoint: bool = False + ) -> ScalarT: ... + @overload # 0d int64 (default) def integers( - self, - low: int, - high: int | None = None, - size: None = None, - dtype: _DTypeLike[np.int64] | _Int64Codes = ..., - endpoint: bool = False, + self, low: int, high: int | None = None, size: None = None, dtype: _DTypeLikeI64 = ..., endpoint: bool = False ) -> np.int64: ... - @overload - def integers( - self, - low: int, - high: int | None = None, - size: None = None, - *, - dtype: type[bool], - endpoint: bool = False, - ) -> bool: ... - @overload - def integers( - self, - low: int, - high: int | None = None, - size: None = None, - *, - dtype: type[int], - endpoint: bool = False, - ) -> int: ... - @overload - def integers( - self, - low: int, - high: int | None = None, - size: None = None, - *, - dtype: _DTypeLike[np.bool] | _BoolCodes, - endpoint: bool = False, - ) -> np.bool: ... - @overload - def integers( - self, - low: int, - high: int | None = None, - size: None = None, - *, - dtype: _DTypeLike[_IntegerT], - endpoint: bool = False, - ) -> _IntegerT: ... - @overload - def integers( - self, - low: _ArrayLikeInt_co, - high: _ArrayLikeInt_co | None = None, - size: _ShapeLike | None = None, - dtype: _DTypeLike[np.int64] | _Int64Codes = ..., - endpoint: bool = False, - ) -> NDArray[np.int64]: ... - @overload + @overload # 0d unknown def integers( + self, low: int, high: int | None = None, size: None = None, dtype: DTypeLike | None = ..., endpoint: bool = False + ) -> Any: ... + @overload # integer dtype, size= + def integers[ScalarT: np.integer | np.bool]( self, low: _ArrayLikeInt_co, high: _ArrayLikeInt_co | None = None, - size: _ShapeLike | None = None, *, - dtype: _DTypeLikeBool, + size: _ShapeLike, + dtype: _DTypeLike[ScalarT], endpoint: bool = False, - ) -> NDArray[np.bool]: ... - @overload + ) -> NDArray[ScalarT]: ... + @overload # int64 (default), size= def integers( self, low: _ArrayLikeInt_co, high: _ArrayLikeInt_co | None = None, - size: _ShapeLike | None = None, - *, - dtype: _DTypeLike[_IntegerT], - endpoint: bool = False, - ) -> NDArray[_IntegerT]: ... - @overload - def integers( - self, - low: int, - high: int | None = None, - size: None = None, *, - dtype: _Int8Codes, + size: _ShapeLike, + dtype: _DTypeLikeI64 = ..., endpoint: bool = False, - ) -> np.int8: ... - @overload + ) -> NDArray[np.int64]: ... + @overload # unknown, size= def integers( self, low: _ArrayLikeInt_co, high: _ArrayLikeInt_co | None = None, - size: _ShapeLike | None = None, - *, - dtype: _Int8Codes, - endpoint: bool = False, - ) -> NDArray[np.int8]: ... - @overload - def integers( - self, - low: int, - high: int | None = None, - size: None = None, *, - dtype: _UInt8Codes, + size: _ShapeLike, + dtype: DTypeLike | None = ..., endpoint: bool = False, - ) -> np.uint8: ... - @overload - def integers( + ) -> np.ndarray: ... + @overload # >=0d, integer dtype + def integers[ScalarT: np.integer | np.bool]( self, low: _ArrayLikeInt_co, high: _ArrayLikeInt_co | None = None, size: _ShapeLike | None = None, *, - dtype: _UInt8Codes, - endpoint: bool = False, - ) -> NDArray[np.uint8]: ... - @overload - def integers( - self, - low: int, - high: int | None = None, - size: None = None, - *, - dtype: _Int16Codes, + dtype: _DTypeLike[ScalarT], endpoint: bool = False, - ) -> np.int16: ... - @overload + ) -> NDArray[ScalarT] | Any: ... + @overload # >=0d, int64 (default) def integers( self, low: _ArrayLikeInt_co, high: _ArrayLikeInt_co | None = None, size: _ShapeLike | None = None, - *, - dtype: _Int16Codes, - endpoint: bool = False, - ) -> NDArray[np.int16]: ... - @overload - def integers( - self, - low: int, - high: int | None = None, - size: None = None, - *, - dtype: _UInt16Codes, + dtype: _DTypeLikeI64 = ..., endpoint: bool = False, - ) -> np.uint16: ... - @overload + ) -> NDArray[np.int64] | Any: ... + @overload # >=0d, unknown def integers( self, low: _ArrayLikeInt_co, high: _ArrayLikeInt_co | None = None, size: _ShapeLike | None = None, - *, - dtype: _UInt16Codes, - endpoint: bool = False, - ) -> NDArray[np.uint16]: ... - @overload - def integers( - self, - low: int, - high: int | None = None, - size: None = None, - *, - dtype: _Int32Codes, + dtype: DTypeLike | None = ..., endpoint: bool = False, - ) -> np.int32: ... - @overload - def integers( + ) -> np.ndarray | Any: ... + + # + @overload # 0d + def zipf(self, /, a: _FloatLike_co, size: None = None) -> int: ... + @overload # size= + def zipf(self, /, a: _ArrayLikeFloat_co, size: _ShapeLike) -> NDArray[np.int64]: ... + @overload # >0d + def zipf(self, /, a: _NDArrayLikeFloat, size: None = None) -> NDArray[np.int64]: ... + @overload # >=0d + def zipf(self, /, a: _ArrayLikeFloat_co, size: None = None) -> NDArray[np.int64] | Any: ... + + # + @overload # 0d + def geometric(self, /, p: _FloatLike_co, size: None = None) -> int: ... + @overload # size= + def geometric(self, /, p: _ArrayLikeFloat_co, size: _ShapeLike) -> NDArray[np.int64]: ... + @overload # >0d + def geometric(self, /, p: _NDArrayLikeFloat, size: None = None) -> NDArray[np.int64]: ... + @overload # >=0d + def geometric(self, /, p: _ArrayLikeFloat_co, size: None = None) -> NDArray[np.int64] | Any: ... + + # + @overload # 0d + def logseries(self, /, p: _FloatLike_co, size: None = None) -> int: ... + @overload # size= + def logseries(self, /, p: _ArrayLikeFloat_co, size: _ShapeLike) -> NDArray[np.int64]: ... + @overload # >0d + def logseries(self, /, p: _NDArrayLikeFloat, size: None = None) -> NDArray[np.int64]: ... + @overload # >=0d + def logseries(self, /, p: _ArrayLikeFloat_co, size: None = None) -> NDArray[np.int64] | Any: ... + + # + @overload # 0d (default) + def poisson(self, /, lam: _FloatLike_co = 1.0, size: None = None) -> int: ... + @overload # size= (keyword) + def poisson(self, /, lam: _ArrayLikeFloat_co = 1.0, *, size: _ShapeLike) -> NDArray[np.int64]: ... + @overload # size= (positional) + def poisson(self, /, lam: _ArrayLikeFloat_co, size: _ShapeLike) -> NDArray[np.int64]: ... + @overload # >0d + def poisson(self, /, lam: _NDArrayLikeFloat, size: None = None) -> NDArray[np.int64]: ... + @overload # >=0d + def poisson(self, /, lam: _ArrayLikeFloat_co, size: None = None) -> NDArray[np.int64] | Any: ... + + # + @overload # 0d, 0d + def binomial(self, /, n: int, p: _FloatLike_co, size: None = None) -> int: ... + @overload # size= + def binomial(self, /, n: _ArrayLikeInt_co, p: _ArrayLikeFloat_co, size: _ShapeLike) -> NDArray[np.int64]: ... + @overload # >=0d, >0d + def binomial(self, /, n: _ArrayLikeInt_co, p: _NDArrayLikeFloat, size: None = None) -> NDArray[np.int64]: ... + @overload # >0d, >=0d + def binomial(self, /, n: _NDArrayLikeInt, p: _ArrayLikeFloat_co, size: None = None) -> NDArray[np.int64]: ... + @overload # >=0d, >=0d + def binomial(self, /, n: _ArrayLikeInt_co, p: _ArrayLikeFloat_co, size: None = None) -> NDArray[np.int64] | Any: ... + + # + @overload # 0d, 0d + def negative_binomial(self, /, n: _FloatLike_co, p: _FloatLike_co, size: None = None) -> int: ... + @overload # size= + def negative_binomial(self, /, n: _ArrayLikeFloat_co, p: _ArrayLikeFloat_co, size: _ShapeLike) -> NDArray[np.int64]: ... + @overload # >=0d, >0d + def negative_binomial(self, /, n: _ArrayLikeFloat_co, p: _NDArrayLikeFloat, size: None = None) -> NDArray[np.int64]: ... + @overload # >0d, >=0d + def negative_binomial(self, /, n: _NDArrayLikeFloat, p: _ArrayLikeFloat_co, size: None = None) -> NDArray[np.int64]: ... + @overload # >=0d, >=0d + def negative_binomial( + self, /, n: _ArrayLikeFloat_co, p: _ArrayLikeFloat_co, size: None = None + ) -> NDArray[np.int64] | Any: ... + + # + @overload # 0d, 0d, 0d + def hypergeometric(self, /, ngood: int, nbad: int, nsample: int, size: None = None) -> int: ... + @overload # size= + def hypergeometric( + self, /, ngood: _ArrayLikeInt_co, nbad: _ArrayLikeInt_co, nsample: _ArrayLikeInt_co, size: _ShapeLike + ) -> NDArray[np.int64]: ... + @overload # >=0d, >=0d, >0d + def hypergeometric( + self, /, ngood: _ArrayLikeInt_co, nbad: _ArrayLikeInt_co, nsample: _NDArrayLikeInt, size: None = None + ) -> NDArray[np.int64] | Any: ... + @overload # >=0d, >0d, >=0d + def hypergeometric( + self, /, ngood: _ArrayLikeInt_co, nbad: _NDArrayLikeInt, nsample: _ArrayLikeInt_co, size: None = None + ) -> NDArray[np.int64] | Any: ... + @overload # >0d, >=0d, >=0d + def hypergeometric( + self, /, ngood: _NDArrayLikeInt, nbad: _ArrayLikeInt_co, nsample: _ArrayLikeInt_co, size: None = None + ) -> NDArray[np.int64] | Any: ... + @overload # >=0d, >=0d, >=0d + def hypergeometric( + self, /, ngood: _ArrayLikeInt_co, nbad: _ArrayLikeInt_co, nsample: _ArrayLikeInt_co, size: None = None + ) -> NDArray[np.int64] | Any: ... + + ### + # multivariate + + # + def dirichlet(self, /, alpha: _ArrayLikeFloat_co, size: _ShapeLike | None = None) -> _ArrayF64: ... + + # + def multivariate_normal( self, - low: _ArrayLikeInt_co, - high: _ArrayLikeInt_co | None = None, + /, + mean: _ArrayLikeFloat_co, + cov: _ArrayLikeFloat_co, size: _ShapeLike | None = None, + check_valid: Literal["warn", "raise", "ignore"] = "warn", + tol: float = 1e-8, *, - dtype: _Int32Codes, - endpoint: bool = False, - ) -> NDArray[np.int32]: ... - @overload - def integers( - self, - low: int, - high: int | None = None, - size: None = None, - *, - dtype: _UInt32Codes, - endpoint: bool = False, - ) -> np.uint32: ... - @overload - def integers( + method: Literal["svd", "eigh", "cholesky"] = "svd", + ) -> _ArrayF64: ... + + # + def multinomial( + self, /, n: _ArrayLikeInt_co, pvals: _ArrayLikeFloat_co, size: _ShapeLike | None = None + ) -> NDArray[np.int64]: ... + + # + def multivariate_hypergeometric( self, - low: _ArrayLikeInt_co, - high: _ArrayLikeInt_co | None = None, + /, + colors: _ArrayLikeInt_co, + nsample: int, size: _ShapeLike | None = None, - *, - dtype: _UInt32Codes, - endpoint: bool = False, - ) -> NDArray[np.uint32]: ... - @overload - def integers( - self, - low: int, - high: int | None = None, - size: None = None, - *, - dtype: _UInt64Codes, - endpoint: bool = False, - ) -> np.uint64: ... + method: Literal["marginals", "count"] = "marginals", + ) -> NDArray[np.int64]: ... + + ### + # resampling + + # axis must be 0 for MutableSequence @overload - def integers( - self, - low: _ArrayLikeInt_co, - high: _ArrayLikeInt_co | None = None, - size: _ShapeLike | None = None, - *, - dtype: _UInt64Codes, - endpoint: bool = False, - ) -> NDArray[np.uint64]: ... + def shuffle(self, /, x: np.ndarray, axis: int = 0) -> None: ... @overload - def integers( - self, - low: int, - high: int | None = None, - size: None = None, - *, - dtype: _IntPCodes, - endpoint: bool = False, - ) -> np.intp: ... + def shuffle(self, /, x: MutableSequence[Any], axis: Literal[0] = 0) -> None: ... + + # @overload - def integers( - self, - low: _ArrayLikeInt_co, - high: _ArrayLikeInt_co | None = None, - size: _ShapeLike | None = None, - *, - dtype: _IntPCodes, - endpoint: bool = False, - ) -> NDArray[np.intp]: ... + def permutation(self, /, x: int, axis: int = 0) -> NDArray[np.int64]: ... @overload - def integers( - self, - low: int, - high: int | None = None, - size: None = None, - *, - dtype: _UIntPCodes, - endpoint: bool = False, - ) -> np.uintp: ... + def permutation(self, /, x: ArrayLike, axis: int = 0) -> np.ndarray: ... + + # @overload - def integers( - self, - low: _ArrayLikeInt_co, - high: _ArrayLikeInt_co | None = None, - size: _ShapeLike | None = None, - *, - dtype: _UIntPCodes, - endpoint: bool = False, - ) -> NDArray[np.uintp]: ... + def permuted[ArrayT: np.ndarray](self, /, x: ArrayT, *, axis: int | None = None, out: None = None) -> ArrayT: ... @overload - def integers( - self, - low: int, - high: int | None = None, - size: None = None, - dtype: DTypeLike = ..., - endpoint: bool = False, - ) -> Any: ... + def permuted(self, /, x: ArrayLike, *, axis: int | None = None, out: None = None) -> np.ndarray: ... @overload - def integers( - self, - low: _ArrayLikeInt_co, - high: _ArrayLikeInt_co | None = None, - size: _ShapeLike | None = None, - dtype: DTypeLike = ..., - endpoint: bool = False, - ) -> NDArray[Any]: ... + def permuted[ArrayT: np.ndarray](self, /, x: ArrayLike, *, axis: int | None = None, out: ArrayT) -> ArrayT: ... - # TODO: Use a TypeVar _T here to get away from Any output? - # Should be int->NDArray[int64], ArrayLike[_T] -> _T | NDArray[Any] - @overload + # + @overload # >=0d int, size=None (default) def choice( self, - a: int, - size: None = ..., - replace: bool = ..., - p: _ArrayLikeFloat_co | None = ..., - axis: int = ..., - shuffle: bool = ..., + /, + a: int | _NestedSequence[int], + size: None = None, + replace: bool = True, + p: _ArrayLikeFloat_co | None = None, + axis: int = 0, + shuffle: bool = True, ) -> int: ... - @overload - def choice( + @overload # >=0d known, size=None (default) + def choice[ScalarT: np.generic]( self, - a: int, - size: _ShapeLike = ..., - replace: bool = ..., - p: _ArrayLikeFloat_co | None = ..., - axis: int = ..., - shuffle: bool = ..., - ) -> NDArray[int64]: ... - @overload + /, + a: _ArrayLike[ScalarT], + size: None = None, + replace: bool = True, + p: _ArrayLikeFloat_co | None = None, + axis: int = 0, + shuffle: bool = True, + ) -> ScalarT: ... + @overload # >=0d unknown, size=None (default) def choice( self, + /, a: ArrayLike, - size: None = ..., - replace: bool = ..., - p: _ArrayLikeFloat_co | None = ..., - axis: int = ..., - shuffle: bool = ..., + size: None = None, + replace: bool = True, + p: _ArrayLikeFloat_co | None = None, + axis: int = 0, + shuffle: bool = True, ) -> Any: ... - @overload + @overload # >=0d int, size= def choice( self, - a: ArrayLike, - size: _ShapeLike = ..., - replace: bool = ..., - p: _ArrayLikeFloat_co | None = ..., - axis: int = ..., - shuffle: bool = ..., - ) -> NDArray[Any]: ... - @overload - def uniform( - self, - low: _FloatLike_co = ..., - high: _FloatLike_co = ..., - size: None = ..., - ) -> float: ... # type: ignore[misc] - @overload - def uniform( - self, - low: _ArrayLikeFloat_co = ..., - high: _ArrayLikeFloat_co = ..., - size: _ShapeLike | None = ..., - ) -> NDArray[float64]: ... - @overload - def normal( - self, - loc: _FloatLike_co = ..., - scale: _FloatLike_co = ..., - size: None = ..., - ) -> float: ... # type: ignore[misc] - @overload - def normal( - self, - loc: _ArrayLikeFloat_co = ..., - scale: _ArrayLikeFloat_co = ..., - size: _ShapeLike | None = ..., - ) -> NDArray[float64]: ... - @overload - def standard_gamma( # type: ignore[misc] - self, - shape: _FloatLike_co, - size: None = ..., - dtype: _DTypeLikeFloat32 | _DTypeLikeFloat64 = ..., - out: None = ..., - ) -> float: ... - @overload - def standard_gamma( - self, - shape: _ArrayLikeFloat_co, - size: _ShapeLike | None = ..., - ) -> NDArray[float64]: ... - @overload - def standard_gamma( - self, - shape: _ArrayLikeFloat_co, - *, - out: NDArray[float64] = ..., - ) -> NDArray[float64]: ... - @overload - def standard_gamma( - self, - shape: _ArrayLikeFloat_co, - size: _ShapeLike | None = ..., - dtype: _DTypeLikeFloat32 = ..., - out: NDArray[float32] | None = ..., - ) -> NDArray[float32]: ... - @overload - def standard_gamma( - self, - shape: _ArrayLikeFloat_co, - size: _ShapeLike | None = ..., - dtype: _DTypeLikeFloat64 = ..., - out: NDArray[float64] | None = ..., - ) -> NDArray[float64]: ... - @overload - def gamma( - self, shape: _FloatLike_co, scale: _FloatLike_co = ..., size: None = ... - ) -> float: ... # type: ignore[misc] - @overload - def gamma( - self, - shape: _ArrayLikeFloat_co, - scale: _ArrayLikeFloat_co = ..., - size: _ShapeLike | None = ..., - ) -> NDArray[float64]: ... - @overload - def f( - self, dfnum: _FloatLike_co, dfden: _FloatLike_co, size: None = ... - ) -> float: ... # type: ignore[misc] - @overload - def f( - self, - dfnum: _ArrayLikeFloat_co, - dfden: _ArrayLikeFloat_co, - size: _ShapeLike | None = ... - ) -> NDArray[float64]: ... - @overload - def noncentral_f( - self, - dfnum: _FloatLike_co, - dfden: _FloatLike_co, - nonc: _FloatLike_co, size: None = ... - ) -> float: ... # type: ignore[misc] - @overload - def noncentral_f( - self, - dfnum: _ArrayLikeFloat_co, - dfden: _ArrayLikeFloat_co, - nonc: _ArrayLikeFloat_co, - size: _ShapeLike | None = ..., - ) -> NDArray[float64]: ... - @overload - def chisquare(self, df: _FloatLike_co, size: None = ...) -> float: ... # type: ignore[misc] - @overload - def chisquare( - self, df: _ArrayLikeFloat_co, size: _ShapeLike | None = ... - ) -> NDArray[float64]: ... - @overload - def noncentral_chisquare( - self, df: _FloatLike_co, nonc: _FloatLike_co, size: None = ... - ) -> float: ... # type: ignore[misc] - @overload - def noncentral_chisquare( - self, - df: _ArrayLikeFloat_co, - nonc: _ArrayLikeFloat_co, - size: _ShapeLike | None = ... - ) -> NDArray[float64]: ... - @overload - def standard_t(self, df: _FloatLike_co, size: None = ...) -> float: ... # type: ignore[misc] - @overload - def standard_t( - self, df: _ArrayLikeFloat_co, size: None = ... - ) -> NDArray[float64]: ... - @overload - def standard_t( - self, df: _ArrayLikeFloat_co, size: _ShapeLike = ... - ) -> NDArray[float64]: ... - @overload - def vonmises( - self, mu: _FloatLike_co, kappa: _FloatLike_co, size: None = ... - ) -> float: ... # type: ignore[misc] - @overload - def vonmises( - self, - mu: _ArrayLikeFloat_co, - kappa: _ArrayLikeFloat_co, - size: _ShapeLike | None = ... - ) -> NDArray[float64]: ... - @overload - def pareto(self, a: _FloatLike_co, size: None = ...) -> float: ... # type: ignore[misc] - @overload - def pareto( - self, a: _ArrayLikeFloat_co, size: _ShapeLike | None = ... - ) -> NDArray[float64]: ... - @overload - def weibull(self, a: _FloatLike_co, size: None = ...) -> float: ... # type: ignore[misc] - @overload - def weibull( - self, a: _ArrayLikeFloat_co, size: _ShapeLike | None = ... - ) -> NDArray[float64]: ... - @overload - def power(self, a: _FloatLike_co, size: None = ...) -> float: ... # type: ignore[misc] - @overload - def power( - self, a: _ArrayLikeFloat_co, size: _ShapeLike | None = ... - ) -> NDArray[float64]: ... - @overload - def standard_cauchy(self, size: None = ...) -> float: ... # type: ignore[misc] - @overload - def standard_cauchy(self, size: _ShapeLike = ...) -> NDArray[float64]: ... - @overload - def laplace( - self, - loc: _FloatLike_co = ..., - scale: _FloatLike_co = ..., - size: None = ..., - ) -> float: ... # type: ignore[misc] - @overload - def laplace( - self, - loc: _ArrayLikeFloat_co = ..., - scale: _ArrayLikeFloat_co = ..., - size: _ShapeLike | None = ..., - ) -> NDArray[float64]: ... - @overload - def gumbel( - self, - loc: _FloatLike_co = ..., - scale: _FloatLike_co = ..., - size: None = ..., - ) -> float: ... # type: ignore[misc] - @overload - def gumbel( - self, - loc: _ArrayLikeFloat_co = ..., - scale: _ArrayLikeFloat_co = ..., - size: _ShapeLike | None = ..., - ) -> NDArray[float64]: ... - @overload - def logistic( - self, - loc: _FloatLike_co = ..., - scale: _FloatLike_co = ..., - size: None = ..., - ) -> float: ... # type: ignore[misc] - @overload - def logistic( - self, - loc: _ArrayLikeFloat_co = ..., - scale: _ArrayLikeFloat_co = ..., - size: _ShapeLike | None = ..., - ) -> NDArray[float64]: ... - @overload - def lognormal( - self, - mean: _FloatLike_co = ..., - sigma: _FloatLike_co = ..., - size: None = ..., - ) -> float: ... # type: ignore[misc] - @overload - def lognormal( - self, - mean: _ArrayLikeFloat_co = ..., - sigma: _ArrayLikeFloat_co = ..., - size: _ShapeLike | None = ..., - ) -> NDArray[float64]: ... - @overload - def rayleigh(self, scale: _FloatLike_co = ..., size: None = ...) -> float: ... # type: ignore[misc] - @overload - def rayleigh( - self, scale: _ArrayLikeFloat_co = ..., size: _ShapeLike | None = ... - ) -> NDArray[float64]: ... - @overload - def wald( - self, mean: _FloatLike_co, scale: _FloatLike_co, size: None = ... - ) -> float: ... # type: ignore[misc] - @overload - def wald( - self, - mean: _ArrayLikeFloat_co, - scale: _ArrayLikeFloat_co, - size: _ShapeLike | None = ... - ) -> NDArray[float64]: ... - @overload - def triangular( - self, - left: _FloatLike_co, - mode: _FloatLike_co, - right: _FloatLike_co, - size: None = ..., - ) -> float: ... # type: ignore[misc] - @overload - def triangular( - self, - left: _ArrayLikeFloat_co, - mode: _ArrayLikeFloat_co, - right: _ArrayLikeFloat_co, - size: _ShapeLike | None = ..., - ) -> NDArray[float64]: ... - @overload - def binomial(self, n: int, p: _FloatLike_co, size: None = ...) -> int: ... # type: ignore[misc] - @overload - def binomial( - self, n: _ArrayLikeInt_co, p: _ArrayLikeFloat_co, size: _ShapeLike | None = ... - ) -> NDArray[int64]: ... - @overload - def negative_binomial( - self, n: _FloatLike_co, p: _FloatLike_co, size: None = ... - ) -> int: ... # type: ignore[misc] - @overload - def negative_binomial( - self, - n: _ArrayLikeFloat_co, - p: _ArrayLikeFloat_co, - size: _ShapeLike | None = ... - ) -> NDArray[int64]: ... - @overload - def poisson(self, lam: _FloatLike_co = ..., size: None = ...) -> int: ... # type: ignore[misc] - @overload - def poisson( - self, lam: _ArrayLikeFloat_co = ..., size: _ShapeLike | None = ... - ) -> NDArray[int64]: ... - @overload - def zipf(self, a: _FloatLike_co, size: None = ...) -> int: ... # type: ignore[misc] - @overload - def zipf( - self, a: _ArrayLikeFloat_co, size: _ShapeLike | None = ... - ) -> NDArray[int64]: ... - @overload - def geometric(self, p: _FloatLike_co, size: None = ...) -> int: ... # type: ignore[misc] - @overload - def geometric( - self, p: _ArrayLikeFloat_co, size: _ShapeLike | None = ... - ) -> NDArray[int64]: ... - @overload - def hypergeometric( - self, ngood: int, nbad: int, nsample: int, size: None = ... - ) -> int: ... # type: ignore[misc] - @overload - def hypergeometric( - self, - ngood: _ArrayLikeInt_co, - nbad: _ArrayLikeInt_co, - nsample: _ArrayLikeInt_co, - size: _ShapeLike | None = ..., - ) -> NDArray[int64]: ... - @overload - def logseries(self, p: _FloatLike_co, size: None = ...) -> int: ... # type: ignore[misc] - @overload - def logseries( - self, p: _ArrayLikeFloat_co, size: _ShapeLike | None = ... - ) -> NDArray[int64]: ... - def multivariate_normal( - self, - mean: _ArrayLikeFloat_co, - cov: _ArrayLikeFloat_co, - size: _ShapeLike | None = ..., - check_valid: Literal["warn", "raise", "ignore"] = ..., - tol: float = ..., - *, - method: Literal["svd", "eigh", "cholesky"] = ..., - ) -> NDArray[float64]: ... - def multinomial( - self, n: _ArrayLikeInt_co, - pvals: _ArrayLikeFloat_co, - size: _ShapeLike | None = ... - ) -> NDArray[int64]: ... - def multivariate_hypergeometric( + /, + a: int | _NestedSequence[int], + size: _ShapeLike, + replace: bool = True, + p: _ArrayLikeFloat_co | None = None, + axis: int = 0, + shuffle: bool = True, + ) -> NDArray[np.int64]: ... + @overload # >=0d known, size= + def choice[ScalarT: np.generic]( + self, + /, + a: _ArrayLike[ScalarT], + size: _ShapeLike, + replace: bool = True, + p: _ArrayLikeFloat_co | None = None, + axis: int = 0, + shuffle: bool = True, + ) -> NDArray[ScalarT]: ... + @overload # >=0d unknown, size= + def choice( self, - colors: _ArrayLikeInt_co, - nsample: int, - size: _ShapeLike | None = ..., - method: Literal["marginals", "count"] = ..., - ) -> NDArray[int64]: ... - def dirichlet( - self, alpha: _ArrayLikeFloat_co, size: _ShapeLike | None = ... - ) -> NDArray[float64]: ... - def permuted( - self, x: ArrayLike, *, axis: int | None = ..., out: NDArray[Any] | None = ... - ) -> NDArray[Any]: ... - def shuffle(self, x: ArrayLike, axis: int = ...) -> None: ... - -def default_rng( - seed: _ArrayLikeInt_co | SeedSequence | BitGenerator | Generator | RandomState | None = ... -) -> Generator: ... + /, + a: ArrayLike, + size: _ShapeLike, + replace: bool = True, + p: _ArrayLikeFloat_co | None = None, + axis: int = 0, + shuffle: bool = True, + ) -> np.ndarray: ... + +def default_rng(seed: _ArrayLikeInt_co | SeedSequence | BitGenerator | Generator | RandomState | None = None) -> Generator: ... diff --git a/numpy/random/_generator.pyx b/numpy/random/_generator.pyx index c067a0821563..35794bdfca6a 100644 --- a/numpy/random/_generator.pyx +++ b/numpy/random/_generator.pyx @@ -16,13 +16,11 @@ from numpy.lib.array_utils import normalize_axis_index from .c_distributions cimport * from libc cimport string from libc.math cimport sqrt -from libc.stdint cimport (uint8_t, uint16_t, uint32_t, uint64_t, - int32_t, int64_t, INT64_MAX, SIZE_MAX) +from libc.stdint cimport (uint64_t, int64_t, INT64_MAX, SIZE_MAX) from ._bounded_integers cimport (_rand_bool, _rand_int32, _rand_int64, _rand_int16, _rand_int8, _rand_uint64, _rand_uint32, _rand_uint16, _rand_uint8, _gen_mask) from ._pcg64 import PCG64 -from ._mt19937 import MT19937 from numpy.random cimport bitgen_t from ._common cimport (POISSON_LAM_MAX, CONS_POSITIVE, CONS_NONE, CONS_NON_NEGATIVE, CONS_BOUNDED_0_1, CONS_BOUNDED_GT_0_1, @@ -142,8 +140,8 @@ cdef bint _check_bit_generator(object bitgen): cdef class Generator: - """ - Generator(bit_generator) + # the first line is used to populate `__text_signature__` + """Generator(bit_generator)\n-- Container for the BitGenerators. @@ -352,7 +350,6 @@ cdef class Generator: [-1.23204345, -1.75224494]]) """ - cdef double temp _dtype = np.dtype(dtype) if _dtype == np.float64: return double_fill(&random_standard_uniform_fill, &self._bitgen, size, self.lock, out) @@ -399,8 +396,8 @@ cdef class Generator: Drawn samples from the parameterized beta distribution. Examples - -------- - The beta distribution has mean a/(a+b). If ``a == b`` and both + -------- + The beta distribution has mean a/(a+b). If ``a == b`` and both are > 1, the distribution is symmetric with mean 0.5. >>> rng = np.random.default_rng() @@ -408,11 +405,11 @@ cdef class Generator: >>> sample = rng.beta(a=a, b=b, size=size) >>> np.mean(sample) 0.5047328775385895 # may vary - + Otherwise the distribution is skewed left or right according to whether ``a`` or ``b`` is greater. The distribution is mirror symmetric. See for example: - + >>> a, b, size = 2, 7, 10000 >>> sample_left = rng.beta(a=a, b=b, size=size) >>> sample_right = rng.beta(a=b, b=a, size=size) @@ -425,12 +422,12 @@ cdef class Generator: -0.0003163943736596009 # may vary Display the histogram of the two samples: - + >>> import matplotlib.pyplot as plt - >>> plt.hist([sample_left, sample_right], + >>> plt.hist([sample_left, sample_right], ... 50, density=True, histtype='bar') >>> plt.show() - + References ---------- .. [1] Wikipedia, "Beta distribution", @@ -480,17 +477,17 @@ cdef class Generator: Examples -------- - Assume a company has 10000 customer support agents and the time - between customer calls is exponentially distributed and that the + Assume a company has 10000 customer support agents and the time + between customer calls is exponentially distributed and that the average time between customer calls is 4 minutes. >>> scale, size = 4, 10000 >>> rng = np.random.default_rng() >>> time_between_calls = rng.exponential(scale=scale, size=size) - What is the probability that a customer will call in the next - 4 to 5 minutes? - + What is the probability that a customer will call in the next + 4 to 5 minutes? + >>> x = ((time_between_calls < 5).sum())/size >>> y = ((time_between_calls < 4).sum())/size >>> x - y @@ -721,10 +718,10 @@ cdef class Generator: Notes ----- - This function generates random bytes from a discrete uniform - distribution. The generated bytes are independent from the CPU's + This function generates random bytes from a discrete uniform + distribution. The generated bytes are independent from the CPU's native endianness. - + Examples -------- >>> rng = np.random.default_rng() @@ -796,7 +793,7 @@ cdef class Generator: than the optimized sampler even if each element of ``p`` is 1 / len(a). ``p`` must sum to 1 when cast to ``float64``. To ensure this, you may wish - to normalize using ``p = p / np.sum(p, dtype=float)``. + to normalize using ``p = p / np.sum(p, dtype=np.float64)``. When passing ``a`` as an integer type and ``size`` is not specified, the return type is a native Python ``int``. @@ -845,7 +842,7 @@ cdef class Generator: """ - cdef int64_t val, t, loc, size_i, pop_size_i + cdef int64_t val, loc, size_i, pop_size_i cdef int64_t *idx_data cdef np.npy_intp j cdef uint64_t set_size, mask @@ -1027,9 +1024,9 @@ cdef class Generator: greater than or equal to low. The default value is 0. high : float or array_like of floats Upper boundary of the output interval. All values generated will be - less than high. The high limit may be included in the returned array of - floats due to floating-point rounding in the equation - ``low + (high-low) * random_sample()``. high - low must be + less than high. The high limit may be included in the returned array of + floats due to floating-point rounding in the equation + ``low + (high-low) * random_sample()``. high - low must be non-negative. The default value is 1.0. size : int or tuple of ints, optional Output shape. If the given shape is, e.g., ``(m, n, k)``, then @@ -1080,7 +1077,6 @@ cdef class Generator: >>> plt.show() """ - cdef bint is_scalar = True cdef np.ndarray alow, ahigh, arange cdef double _low, _high, rng cdef object temp @@ -1370,7 +1366,6 @@ cdef class Generator: >>> plt.show() """ - cdef void *func _dtype = np.dtype(dtype) if _dtype == np.float64: return cont(&random_standard_gamma, &self._bitgen, size, self.lock, 1, @@ -1521,7 +1516,7 @@ cdef class Generator: Examples -------- - An example from Glantz[1], pp 47-40: + An example from Glantz [1]_, pp 47-40: Two groups, children of diabetics (25 people) and children from people without diabetes (25 controls). Fasting blood glucose was measured, @@ -1546,10 +1541,10 @@ cdef class Generator: So there is about a 1% chance that the F statistic will exceed 7.62, the measured value is 36, so the null hypothesis is rejected at the 1% level. - - The corresponding probability density function for ``n = 20`` + + The corresponding probability density function for ``n = 20`` and ``m = 20`` is: - + >>> import matplotlib.pyplot as plt >>> from scipy import stats >>> dfnum, dfden, size = 20, 20, 10000 @@ -1559,7 +1554,7 @@ cdef class Generator: >>> plt.plot(x, stats.f.pdf(x, dfnum, dfden)) >>> plt.xlim([0, 5]) >>> plt.show() - + """ return cont(&random_f, &self._bitgen, size, self.lock, 2, dfnum, 'dfnum', CONS_POSITIVE, @@ -1706,7 +1701,7 @@ cdef class Generator: The distribution of a chi-square random variable with 20 degrees of freedom looks as follows: - + >>> import matplotlib.pyplot as plt >>> import scipy.stats as stats >>> s = rng.chisquare(20, 10000) @@ -1926,14 +1921,14 @@ cdef class Generator: Does their energy intake deviate systematically from the recommended value of 7725 kJ? Our null hypothesis will be the absence of deviation, and the alternate hypothesis will be the presence of an effect that could be - either positive or negative, hence making our test 2-tailed. + either positive or negative, hence making our test 2-tailed. Because we are estimating the mean and we have N=11 values in our sample, - we have N-1=10 degrees of freedom. We set our significance level to 95% and - compute the t statistic using the empirical mean and empirical standard - deviation of our intake. We use a ddof of 1 to base the computation of our + we have N-1=10 degrees of freedom. We set our significance level to 95% and + compute the t statistic using the empirical mean and empirical standard + deviation of our intake. We use a ddof of 1 to base the computation of our empirical standard deviation on an unbiased estimate of the variance (note: - the final estimate is not unbiased due to the concave nature of the square + the final estimate is not unbiased due to the concave nature of the square root). >>> np.mean(intake) @@ -1952,18 +1947,18 @@ cdef class Generator: >>> s = rng.standard_t(10, size=1000000) >>> h = plt.hist(s, bins=100, density=True) - Does our t statistic land in one of the two critical regions found at + Does our t statistic land in one of the two critical regions found at both tails of the distribution? >>> np.sum(np.abs(t) < np.abs(s)) / float(len(s)) 0.018318 #random < 0.05, statistic is in critical region - The probability value for this 2-tailed test is about 1.83%, which is - lower than the 5% pre-determined significance threshold. + The probability value for this 2-tailed test is about 1.83%, which is + lower than the 5% pre-determined significance threshold. Therefore, the probability of observing values as extreme as our intake - conditionally on the null hypothesis being true is too low, and we reject - the null hypothesis of no deviation. + conditionally on the null hypothesis being true is too low, and we reject + the null hypothesis of no deviation. """ return cont(&random_standard_t, &self._bitgen, size, self.lock, 1, @@ -2088,7 +2083,7 @@ cdef class Generator: ----- The probability density for the Pareto II distribution is - .. math:: p(x) = \\frac{a}{{x+1}^{a+1}} , x \ge 0 + .. math:: p(x) = \\frac{a}{(x+1)^{a+1}} , x \ge 0 where :math:`a > 0` is the shape. @@ -2932,7 +2927,6 @@ cdef class Generator: >>> plt.show() """ - cdef bint is_scalar = True cdef double fleft, fmode, fright cdef np.ndarray oleft, omode, oright @@ -3040,21 +3034,21 @@ cdef class Generator: Draw samples from the distribution: >>> rng = np.random.default_rng() - >>> n, p, size = 10, .5, 10000 + >>> n, p, size = 10, .5, 10000 >>> s = rng.binomial(n, p, 10000) Assume a company drills 9 wild-cat oil exploration wells, each with - an estimated probability of success of ``p=0.1``. All nine wells fail. + an estimated probability of success of ``p=0.1``. All nine wells fail. What is the probability of that happening? - Over ``size = 20,000`` trials the probability of this happening + Over ``size = 20,000`` trials the probability of this happening is on average: >>> n, p, size = 9, 0.1, 20000 >>> np.sum(rng.binomial(n=n, p=p, size=size) == 0)/size 0.39015 # may vary - The following can be used to visualize a sample with ``n=100``, + The following can be used to visualize a sample with ``n=100``, ``p=0.4`` and the corresponding probability density function: >>> import matplotlib.pyplot as plt @@ -3173,10 +3167,10 @@ cdef class Generator: appear before the third "1" is a negative binomial distribution. Because this method internally calls ``Generator.poisson`` with an - intermediate random value, a ValueError is raised when the choice of + intermediate random value, a ValueError is raised when the choice of :math:`n` and :math:`p` would result in the mean + 10 sigma of the sampled - intermediate distribution exceeding the max acceptable value of the - ``Generator.poisson`` method. This happens when :math:`p` is too low + intermediate distribution exceeding the max acceptable value of the + ``Generator.poisson`` method. This happens when :math:`p` is too low (a lot of failures happen for every success) and :math:`n` is too big ( a lot of successes are allowed). Therefore, the :math:`n` and :math:`p` values must satisfy the constraint: @@ -3308,7 +3302,7 @@ cdef class Generator: >>> s = rng.poisson(lam=lam, size=size) Verify the mean and variance, which should be approximately ``lam``: - + >>> s.mean(), s.var() (4.9917 5.1088311) # may vary @@ -3462,7 +3456,7 @@ cdef class Generator: Examples -------- - Draw 10,000 values from the geometric distribution, with the + Draw 10,000 values from the geometric distribution, with the probability of an individual success equal to ``p = 0.35``: >>> p, size = 0.35, 10000 @@ -3481,7 +3475,7 @@ cdef class Generator: >>> plt.plot(bins, (1-p)**(bins-1)*p) >>> plt.xlim([0, 25]) >>> plt.show() - + """ return disc(&random_geometric, &self._bitgen, size, self.lock, 1, 0, p, 'p', CONS_BOUNDED_GT_0_1, @@ -3595,7 +3589,6 @@ cdef class Generator: """ cdef double HYPERGEOM_MAX = 10**9 - cdef bint is_scalar = True cdef np.ndarray ongood, onbad, onsample cdef int64_t lngood, lnbad, lnsample @@ -3670,8 +3663,8 @@ cdef class Generator: The log series distribution is frequently used to represent species richness and occurrence, first proposed by Fisher, Corbet, and - Williams in 1943 [2]. It may also be used to model the numbers of - occupants seen in cars [3]. + Williams in 1943 [2]_. It may also be used to model the numbers of + occupants seen in cars [3]_. References ---------- @@ -3699,7 +3692,7 @@ cdef class Generator: >>> bins = np.arange(-.5, max(s) + .5 ) >>> count, bins, _ = plt.hist(s, bins=bins, label='Sample count') - # plot against distribution + Plot against the distribution: >>> def logseries(k, p): ... return -p**k/(k*np.log(1-p)) @@ -3773,7 +3766,7 @@ cdef class Generator: Covariance indicates the level to which two variables vary together. From the multivariate normal distribution, we draw N-dimensional - samples, :math:`X = [x_1, x_2, ... x_N]`. The covariance matrix + samples, :math:`X = [x_1, x_2, ..., x_N]`. The covariance matrix element :math:`C_{ij}` is the covariance of :math:`x_i` and :math:`x_j`. The element :math:`C_{ii}` is the variance of :math:`x_i` (i.e. its "spread"). @@ -3791,7 +3784,8 @@ cdef class Generator: >>> mean = [0, 0] >>> cov = [[1, 0], [0, 100]] # diagonal covariance - Diagonal covariance means that points are oriented along x or y-axis: + Diagonal covariance means that the variables are independent, and the + probability density contours have their axes aligned with the coordinate axes: >>> import matplotlib.pyplot as plt >>> rng = np.random.default_rng() @@ -4652,11 +4646,11 @@ cdef class Generator: -------- shuffle permutation - + Notes ----- - An important distinction between methods ``shuffle`` and ``permuted`` is - how they both treat the ``axis`` parameter which can be found at + An important distinction between methods ``shuffle`` and ``permuted`` is + how they both treat the ``axis`` parameter which can be found at :ref:`generator-handling-axis-parameter`. Examples @@ -4728,7 +4722,7 @@ cdef class Generator: if axis is None: if x.ndim > 1: if not (np.PyArray_FLAGS(out) & (np.NPY_ARRAY_C_CONTIGUOUS | - np.NPY_ARRAY_F_CONTIGUOUS)): + np.NPY_ARRAY_F_CONTIGUOUS)): flags = (np.NPY_ARRAY_C_CONTIGUOUS | NPY_ARRAY_WRITEBACKIFCOPY) to_shuffle = PyArray_FromArray(out, @@ -4808,8 +4802,8 @@ cdef class Generator: Notes ----- - An important distinction between methods ``shuffle`` and ``permuted`` is - how they both treat the ``axis`` parameter which can be found at + An important distinction between methods ``shuffle`` and ``permuted`` is + how they both treat the ``axis`` parameter which can be found at :ref:`generator-handling-axis-parameter`. Examples @@ -5017,11 +5011,11 @@ def default_rng(seed=None): Examples -------- `default_rng` is the recommended constructor for the random number class - `Generator`. Here are several ways we can construct a random - number generator using `default_rng` and the `Generator` class. + `Generator`. Here are several ways we can construct a random + number generator using `default_rng` and the `Generator` class. Here we use `default_rng` to generate a random float: - + >>> import numpy as np >>> rng = np.random.default_rng(12345) >>> print(rng) @@ -5031,10 +5025,10 @@ def default_rng(seed=None): 0.22733602246716966 >>> type(rfloat) - - Here we use `default_rng` to generate 3 random integers between 0 + + Here we use `default_rng` to generate 3 random integers between 0 (inclusive) and 10 (exclusive): - + >>> import numpy as np >>> rng = np.random.default_rng(12345) >>> rints = rng.integers(low=0, high=10, size=3) @@ -5042,9 +5036,9 @@ def default_rng(seed=None): array([6, 2, 7]) >>> type(rints[0]) - + Here we specify a seed so that we have reproducible results: - + >>> import numpy as np >>> rng = np.random.default_rng(seed=42) >>> print(rng) diff --git a/numpy/random/_mt19937.pyi b/numpy/random/_mt19937.pyi index 70b2506da7af..03373a6dd6ea 100644 --- a/numpy/random/_mt19937.pyi +++ b/numpy/random/_mt19937.pyi @@ -5,6 +5,8 @@ from numpy._typing import _ArrayLikeInt_co from numpy.random.bit_generator import BitGenerator, SeedSequence from numpy.typing import NDArray +__all__ = ["MT19937"] + @type_check_only class _MT19937Internal(TypedDict): key: NDArray[uint32] @@ -18,8 +20,8 @@ class _MT19937State(TypedDict): class MT19937(BitGenerator): def __init__(self, seed: _ArrayLikeInt_co | SeedSequence | None = ...) -> None: ... def _legacy_seeding(self, seed: _ArrayLikeInt_co) -> None: ... - def jumped(self, jumps: int = ...) -> MT19937: ... - @property + def jumped(self, jumps: int = 1) -> MT19937: ... + @property # type: ignore[override] def state(self) -> _MT19937State: ... @state.setter def state(self, value: _MT19937State) -> None: ... diff --git a/numpy/random/_mt19937.pyx b/numpy/random/_mt19937.pyx index ed69c2aa6c58..c74498356dda 100644 --- a/numpy/random/_mt19937.pyx +++ b/numpy/random/_mt19937.pyx @@ -43,8 +43,8 @@ cdef uint64_t mt19937_raw(void *st) noexcept nogil: return mt19937_next32( st) cdef class MT19937(BitGenerator): - """ - MT19937(seed=None) + # the first line is used to populate `__text_signature__` + """MT19937(seed=None)\n-- Container for the Mersenne Twister pseudo-random number generator. diff --git a/numpy/random/_pcg64.pyi b/numpy/random/_pcg64.pyi index 5dc7bb66321b..a9e81f7f181b 100644 --- a/numpy/random/_pcg64.pyi +++ b/numpy/random/_pcg64.pyi @@ -3,6 +3,8 @@ from typing import TypedDict, type_check_only from numpy._typing import _ArrayLikeInt_co from numpy.random.bit_generator import BitGenerator, SeedSequence +__all__ = ["PCG64"] + @type_check_only class _PCG64Internal(TypedDict): state: int @@ -17,8 +19,8 @@ class _PCG64State(TypedDict): class PCG64(BitGenerator): def __init__(self, seed: _ArrayLikeInt_co | SeedSequence | None = ...) -> None: ... - def jumped(self, jumps: int = ...) -> PCG64: ... - @property + def jumped(self, jumps: int = 1) -> PCG64: ... + @property # type: ignore[override] def state( self, ) -> _PCG64State: ... @@ -31,14 +33,9 @@ class PCG64(BitGenerator): class PCG64DXSM(BitGenerator): def __init__(self, seed: _ArrayLikeInt_co | SeedSequence | None = ...) -> None: ... - def jumped(self, jumps: int = ...) -> PCG64DXSM: ... - @property - def state( - self, - ) -> _PCG64State: ... + def jumped(self, jumps: int = 1) -> PCG64DXSM: ... + @property # type: ignore[override] + def state(self) -> _PCG64State: ... @state.setter - def state( - self, - value: _PCG64State, - ) -> None: ... + def state(self, value: _PCG64State) -> None: ... def advance(self, delta: int) -> PCG64DXSM: ... diff --git a/numpy/random/_pcg64.pyx b/numpy/random/_pcg64.pyx index e6e9b8e0ac3c..30a00a11aa1d 100644 --- a/numpy/random/_pcg64.pyx +++ b/numpy/random/_pcg64.pyx @@ -51,8 +51,8 @@ cdef double pcg64_cm_double(void* st) noexcept nogil: return uint64_to_double(pcg64_cm_next64(st)) cdef class PCG64(BitGenerator): - """ - PCG64(seed=None) + # the first line is used to populate `__text_signature__` + """PCG64(seed=None)\n-- BitGenerator for the PCG-64 pseudo-random number generator. @@ -264,7 +264,7 @@ cdef class PCG64(BitGenerator): * The random values are simulated using a rejection-based method and so, on average, more than one value from the underlying - RNG is required to generate an single draw. + RNG is required to generate a single draw. * The number of bits required to generate a simulated value differs from the number of bits generated by the underlying RNG. For example, two 16-bit integer values can be simulated @@ -284,8 +284,8 @@ cdef class PCG64(BitGenerator): cdef class PCG64DXSM(BitGenerator): - """ - PCG64DXSM(seed=None) + # the first line is used to populate `__text_signature__` + """PCG64DXSM(seed=None)\n-- BitGenerator for the PCG-64 DXSM pseudo-random number generator. @@ -498,7 +498,7 @@ cdef class PCG64DXSM(BitGenerator): * The random values are simulated using a rejection-based method and so, on average, more than one value from the underlying - RNG is required to generate an single draw. + RNG is required to generate a single draw. * The number of bits required to generate a simulated value differs from the number of bits generated by the underlying RNG. For example, two 16-bit integer values can be simulated diff --git a/numpy/random/_philox.pyi b/numpy/random/_philox.pyi index d8895bba67cf..3089f11ea629 100644 --- a/numpy/random/_philox.pyi +++ b/numpy/random/_philox.pyi @@ -5,6 +5,8 @@ from numpy._typing import _ArrayLikeInt_co from numpy.random.bit_generator import BitGenerator, SeedSequence from numpy.typing import NDArray +__all__ = ["Philox"] + @type_check_only class _PhiloxInternal(TypedDict): counter: NDArray[uint64] @@ -26,14 +28,9 @@ class Philox(BitGenerator): counter: _ArrayLikeInt_co | None = ..., key: _ArrayLikeInt_co | None = ..., ) -> None: ... - @property - def state( - self, - ) -> _PhiloxState: ... + @property # type: ignore[override] + def state(self) -> _PhiloxState: ... @state.setter - def state( - self, - value: _PhiloxState, - ) -> None: ... - def jumped(self, jumps: int = ...) -> Philox: ... + def state(self, value: _PhiloxState) -> None: ... + def jumped(self, jumps: int = 1) -> Philox: ... def advance(self, delta: int) -> Philox: ... diff --git a/numpy/random/_philox.pyx b/numpy/random/_philox.pyx index 5faa281818fd..da47ad21e2de 100644 --- a/numpy/random/_philox.pyx +++ b/numpy/random/_philox.pyx @@ -1,7 +1,5 @@ #cython: binding=True -from cpython.pycapsule cimport PyCapsule_New - import numpy as np cimport numpy as np @@ -54,8 +52,8 @@ cdef double philox_double(void*st) noexcept nogil: return uint64_to_double(philox_next64( st)) cdef class Philox(BitGenerator): - """ - Philox(seed=None, counter=None, key=None) + # the first line is used to populate `__text_signature__` + """Philox(seed=None, counter=None, key=None)\n-- Container for the Philox (4x64) pseudo-random number generator. @@ -196,7 +194,7 @@ cdef class Philox(BitGenerator): cdef _reset_state_variables(self): cdef philox_state *rng_state = &self.rng_state - + rng_state[0].has_uint32 = 0 rng_state[0].uinteger = 0 rng_state[0].buffer_pos = PHILOX_BUFFER_SIZE @@ -317,7 +315,7 @@ cdef class Philox(BitGenerator): * The random values are simulated using a rejection-based method and so, on average, more than one value from the underlying - RNG is required to generate an single draw. + RNG is required to generate a single draw. * The number of bits required to generate a simulated value differs from the number of bits generated by the underlying RNG. For example, two 16-bit integer values can be simulated diff --git a/numpy/random/_pickle.pyi b/numpy/random/_pickle.pyi index b8b1b7bcf63b..b0aa143801ba 100644 --- a/numpy/random/_pickle.pyi +++ b/numpy/random/_pickle.pyi @@ -1,5 +1,5 @@ from collections.abc import Callable -from typing import Final, Literal, TypedDict, TypeVar, overload, type_check_only +from typing import Final, Literal, TypedDict, overload, type_check_only from numpy.random._generator import Generator from numpy.random._mt19937 import MT19937 @@ -9,8 +9,6 @@ from numpy.random._sfc64 import SFC64 from numpy.random.bit_generator import BitGenerator from numpy.random.mtrand import RandomState -_T = TypeVar("_T", bound=BitGenerator) - @type_check_only class _BitGenerators(TypedDict): MT19937: type[MT19937] @@ -19,6 +17,8 @@ class _BitGenerators(TypedDict): Philox: type[Philox] SFC64: type[SFC64] +### + BitGenerators: Final[_BitGenerators] = ... @overload @@ -32,7 +32,7 @@ def __bit_generator_ctor(bit_generator: Literal["Philox"]) -> Philox: ... @overload def __bit_generator_ctor(bit_generator: Literal["SFC64"]) -> SFC64: ... @overload -def __bit_generator_ctor(bit_generator: type[_T]) -> _T: ... +def __bit_generator_ctor[BitGeneratorT: BitGenerator](bit_generator: type[BitGeneratorT]) -> BitGeneratorT: ... def __generator_ctor( bit_generator_name: str | type[BitGenerator] | BitGenerator = "MT19937", bit_generator_ctor: Callable[[str | type[BitGenerator]], BitGenerator] = ..., diff --git a/numpy/random/_sfc64.pyi b/numpy/random/_sfc64.pyi index a6f0d8445f25..f5f3fed9c251 100644 --- a/numpy/random/_sfc64.pyi +++ b/numpy/random/_sfc64.pyi @@ -4,6 +4,8 @@ from numpy import uint64 from numpy._typing import NDArray, _ArrayLikeInt_co from numpy.random.bit_generator import BitGenerator, SeedSequence +__all__ = ["SFC64"] + @type_check_only class _SFC64Internal(TypedDict): state: NDArray[uint64] @@ -17,12 +19,7 @@ class _SFC64State(TypedDict): class SFC64(BitGenerator): def __init__(self, seed: _ArrayLikeInt_co | SeedSequence | None = ...) -> None: ... - @property - def state( - self, - ) -> _SFC64State: ... + @property # type: ignore[override] + def state(self) -> _SFC64State: ... @state.setter - def state( - self, - value: _SFC64State, - ) -> None: ... + def state(self, value: _SFC64State) -> None: ... diff --git a/numpy/random/_sfc64.pyx b/numpy/random/_sfc64.pyx index 86136f0b42fb..81a5fc3d21e5 100644 --- a/numpy/random/_sfc64.pyx +++ b/numpy/random/_sfc64.pyx @@ -34,8 +34,8 @@ cdef double sfc64_double(void* st) noexcept nogil: cdef class SFC64(BitGenerator): - """ - SFC64(seed=None) + # the first line is used to populate `__text_signature__` + """SFC64(seed=None)\n-- BitGenerator for Chris Doty-Humphrey's Small Fast Chaotic PRNG. diff --git a/numpy/random/bit_generator.pxd b/numpy/random/bit_generator.pxd index dfa7d0a71c08..dbaab4721fec 100644 --- a/numpy/random/bit_generator.pxd +++ b/numpy/random/bit_generator.pxd @@ -31,5 +31,5 @@ cdef class SeedSequence(): np.ndarray[np.npy_uint32, ndim=1] entropy_array) cdef get_assembled_entropy(self) -cdef class SeedlessSequence(): +cdef class SeedlessSeedSequence: pass diff --git a/numpy/random/bit_generator.pyi b/numpy/random/bit_generator.pyi index 6ce4f4b9d6a1..3c2069aba408 100644 --- a/numpy/random/bit_generator.pyi +++ b/numpy/random/bit_generator.pyi @@ -1,4 +1,5 @@ import abc +from _typeshed import Incomplete from collections.abc import Callable, Mapping, Sequence from threading import Lock from typing import ( @@ -7,13 +8,10 @@ from typing import ( Literal, NamedTuple, Self, - TypeAlias, TypedDict, overload, type_check_only, ) - -from _typeshed import Incomplete from typing_extensions import CapsuleType import numpy as np @@ -30,7 +28,7 @@ __all__ = ["BitGenerator", "SeedSequence"] ### -_DTypeLikeUint_: TypeAlias = _DTypeLike[np.uint32 | np.uint64] | _UInt32Codes | _UInt64Codes +type _DTypeLikeUint_ = _DTypeLike[np.uint32 | np.uint64] | _UInt32Codes | _UInt64Codes @type_check_only class _SeedSeqState(TypedDict): diff --git a/numpy/random/bit_generator.pyx b/numpy/random/bit_generator.pyx index fbedb0fd5786..01b35a7a621a 100644 --- a/numpy/random/bit_generator.pyx +++ b/numpy/random/bit_generator.pyx @@ -34,12 +34,11 @@ SOFTWARE. """ import abc -import sys from itertools import cycle import re from secrets import randbits -from threading import Lock +from threading import RLock from cpython.pycapsule cimport PyCapsule_New @@ -227,8 +226,10 @@ class ISpawnableSeedSequence(ISeedSequence): """ -cdef class SeedlessSeedSequence(): - """ +cdef class SeedlessSeedSequence: + # the first line is used to populate `__text_signature__` + """SeedlessSeedSequence()\n-- + A seed sequence for BitGenerators with no need for seed state. See Also @@ -248,9 +249,9 @@ cdef class SeedlessSeedSequence(): ISpawnableSeedSequence.register(SeedlessSeedSequence) -cdef class SeedSequence(): - """ - SeedSequence(entropy=None, *, spawn_key=(), pool_size=4) +cdef class SeedSequence: + # the first line is used to populate `__text_signature__` + """SeedSequence(entropy=None, *, spawn_key=(), pool_size=4, n_children_spawned=0)\n-- SeedSequence mixes sources of entropy in a reproducible way to set the initial state for independent and very probably non-overlapping @@ -490,9 +491,9 @@ cdef class SeedSequence(): ISpawnableSeedSequence.register(SeedSequence) -cdef class BitGenerator(): - """ - BitGenerator(seed=None) +cdef class BitGenerator: + # the first line is used to populate `__text_signature__` + """BitGenerator(seed=None)\n-- Base Class for generic BitGenerators, which provide a stream of random bits based on different algorithms. Must be overridden. @@ -521,7 +522,7 @@ cdef class BitGenerator(): """ def __init__(self, seed=None): - self.lock = Lock() + self.lock = RLock() self._bitgen.state = 0 if type(self) is BitGenerator: raise NotImplementedError('BitGenerator is a base class and cannot be instantized') diff --git a/numpy/random/mtrand.pyi b/numpy/random/mtrand.pyi index 54bb1462fb5f..7a654971f19b 100644 --- a/numpy/random/mtrand.pyi +++ b/numpy/random/mtrand.pyi @@ -29,7 +29,7 @@ from numpy._typing import ( _Int16Codes, _Int32Codes, _Int64Codes, - _IntCodes, + _IntPCodes, _LongCodes, _ShapeLike, _SupportsDType, @@ -37,11 +37,67 @@ from numpy._typing import ( _UInt16Codes, _UInt32Codes, _UInt64Codes, - _UIntCodes, + _UIntPCodes, _ULongCodes, ) from numpy.random.bit_generator import BitGenerator +__all__ = [ + "RandomState", + "beta", + "binomial", + "bytes", + "chisquare", + "choice", + "dirichlet", + "exponential", + "f", + "gamma", + "geometric", + "get_bit_generator", + "get_state", + "gumbel", + "hypergeometric", + "laplace", + "logistic", + "lognormal", + "logseries", + "multinomial", + "multivariate_normal", + "negative_binomial", + "noncentral_chisquare", + "noncentral_f", + "normal", + "pareto", + "permutation", + "poisson", + "power", + "rand", + "randint", + "randn", + "random", + "random_integers", + "random_sample", + "ranf", + "rayleigh", + "sample", + "seed", + "set_bit_generator", + "set_state", + "shuffle", + "standard_cauchy", + "standard_exponential", + "standard_gamma", + "standard_normal", + "standard_t", + "triangular", + "uniform", + "vonmises", + "wald", + "weibull", + "zipf", +] + class RandomState: _bit_generator: BitGenerator def __init__(self, seed: _ArrayLikeInt_co | BitGenerator | None = ...) -> None: ... @@ -50,268 +106,268 @@ class RandomState: def __getstate__(self) -> dict[str, Any]: ... def __setstate__(self, state: dict[str, Any]) -> None: ... def __reduce__(self) -> tuple[Callable[[BitGenerator], RandomState], tuple[BitGenerator], dict[str, Any]]: ... # noqa: E501 - def seed(self, seed: _ArrayLikeFloat_co | None = ...) -> None: ... + def seed(self, seed: _ArrayLikeFloat_co | None = None) -> None: ... @overload - def get_state(self, legacy: Literal[False] = ...) -> dict[str, Any]: ... + def get_state(self, legacy: Literal[False] = False) -> dict[str, Any]: ... @overload def get_state( - self, legacy: Literal[True] = ... + self, legacy: Literal[True] = True ) -> dict[str, Any] | tuple[str, NDArray[uint32], int, int, float]: ... def set_state( self, state: dict[str, Any] | tuple[str, NDArray[uint32], int, int, float] ) -> None: ... @overload - def random_sample(self, size: None = ...) -> float: ... # type: ignore[misc] + def random_sample(self, size: None = None) -> float: ... @overload def random_sample(self, size: _ShapeLike) -> NDArray[float64]: ... @overload - def random(self, size: None = ...) -> float: ... # type: ignore[misc] + def random(self, size: None = None) -> float: ... @overload def random(self, size: _ShapeLike) -> NDArray[float64]: ... @overload - def beta(self, a: float, b: float, size: None = ...) -> float: ... # type: ignore[misc] + def beta(self, a: float, b: float, size: None = None) -> float: ... @overload def beta( self, a: _ArrayLikeFloat_co, b: _ArrayLikeFloat_co, - size: _ShapeLike | None = ... + size: _ShapeLike | None = None ) -> NDArray[float64]: ... @overload - def exponential(self, scale: float = ..., size: None = ...) -> float: ... # type: ignore[misc] + def exponential(self, scale: float = 1.0, size: None = None) -> float: ... @overload def exponential( - self, scale: _ArrayLikeFloat_co = ..., size: _ShapeLike | None = ... + self, scale: _ArrayLikeFloat_co = 1.0, size: _ShapeLike | None = None ) -> NDArray[float64]: ... @overload - def standard_exponential(self, size: None = ...) -> float: ... # type: ignore[misc] + def standard_exponential(self, size: None = None) -> float: ... @overload def standard_exponential(self, size: _ShapeLike) -> NDArray[float64]: ... @overload - def tomaxint(self, size: None = ...) -> int: ... # type: ignore[misc] + def tomaxint(self, size: None = None) -> int: ... @overload # Generates long values, but stores it in a 64bit int: def tomaxint(self, size: _ShapeLike) -> NDArray[int64]: ... @overload - def randint( # type: ignore[misc] + def randint( self, low: int, - high: int | None = ..., - size: None = ..., + high: int | None = None, + size: None = None, ) -> int: ... @overload - def randint( # type: ignore[misc] + def randint( self, low: int, - high: int | None = ..., - size: None = ..., + high: int | None = None, + size: None = None, dtype: type[bool] = ..., ) -> bool: ... @overload - def randint( # type: ignore[misc] + def randint( self, low: int, - high: int | None = ..., - size: None = ..., + high: int | None = None, + size: None = None, dtype: type[np.bool] = ..., ) -> np.bool: ... @overload - def randint( # type: ignore[misc] + def randint( self, low: int, - high: int | None = ..., - size: None = ..., + high: int | None = None, + size: None = None, dtype: type[int] = ..., ) -> int: ... @overload - def randint( # type: ignore[misc] + def randint( self, low: int, - high: int | None = ..., - size: None = ..., + high: int | None = None, + size: None = None, dtype: dtype[uint8] | type[uint8] | _UInt8Codes | _SupportsDType[dtype[uint8]] = ..., # noqa: E501 ) -> uint8: ... @overload - def randint( # type: ignore[misc] + def randint( self, low: int, - high: int | None = ..., - size: None = ..., + high: int | None = None, + size: None = None, dtype: dtype[uint16] | type[uint16] | _UInt16Codes | _SupportsDType[dtype[uint16]] = ..., # noqa: E501 ) -> uint16: ... @overload - def randint( # type: ignore[misc] + def randint( self, low: int, - high: int | None = ..., - size: None = ..., + high: int | None = None, + size: None = None, dtype: dtype[uint32] | type[uint32] | _UInt32Codes | _SupportsDType[dtype[uint32]] = ..., # noqa: E501 ) -> uint32: ... @overload - def randint( # type: ignore[misc] + def randint( self, low: int, - high: int | None = ..., - size: None = ..., - dtype: dtype[uint] | type[uint] | _UIntCodes | _SupportsDType[dtype[uint]] = ..., # noqa: E501 + high: int | None = None, + size: None = None, + dtype: dtype[uint] | type[uint] | _UIntPCodes | _SupportsDType[dtype[uint]] = ..., # noqa: E501 ) -> uint: ... @overload - def randint( # type: ignore[misc] + def randint( self, low: int, - high: int | None = ..., - size: None = ..., + high: int | None = None, + size: None = None, dtype: dtype[ulong] | type[ulong] | _ULongCodes | _SupportsDType[dtype[ulong]] = ..., # noqa: E501 ) -> ulong: ... @overload - def randint( # type: ignore[misc] + def randint( self, low: int, - high: int | None = ..., - size: None = ..., + high: int | None = None, + size: None = None, dtype: dtype[uint64] | type[uint64] | _UInt64Codes | _SupportsDType[dtype[uint64]] = ..., # noqa: E501 ) -> uint64: ... @overload - def randint( # type: ignore[misc] + def randint( self, low: int, - high: int | None = ..., - size: None = ..., + high: int | None = None, + size: None = None, dtype: dtype[int8] | type[int8] | _Int8Codes | _SupportsDType[dtype[int8]] = ..., # noqa: E501 ) -> int8: ... @overload - def randint( # type: ignore[misc] + def randint( self, low: int, - high: int | None = ..., - size: None = ..., + high: int | None = None, + size: None = None, dtype: dtype[int16] | type[int16] | _Int16Codes | _SupportsDType[dtype[int16]] = ..., # noqa: E501 ) -> int16: ... @overload - def randint( # type: ignore[misc] + def randint( self, low: int, - high: int | None = ..., - size: None = ..., + high: int | None = None, + size: None = None, dtype: dtype[int32] | type[int32] | _Int32Codes | _SupportsDType[dtype[int32]] = ..., # noqa: E501 ) -> int32: ... @overload - def randint( # type: ignore[misc] + def randint( self, low: int, - high: int | None = ..., - size: None = ..., - dtype: dtype[int_] | type[int_] | _IntCodes | _SupportsDType[dtype[int_]] = ..., # noqa: E501 + high: int | None = None, + size: None = None, + dtype: dtype[int_] | type[int_] | _IntPCodes | _SupportsDType[dtype[int_]] = ..., # noqa: E501 ) -> int_: ... @overload - def randint( # type: ignore[misc] + def randint( self, low: int, - high: int | None = ..., - size: None = ..., + high: int | None = None, + size: None = None, dtype: dtype[long] | type[long] | _LongCodes | _SupportsDType[dtype[long]] = ..., # noqa: E501 ) -> long: ... @overload - def randint( # type: ignore[misc] + def randint( self, low: int, - high: int | None = ..., - size: None = ..., + high: int | None = None, + size: None = None, dtype: dtype[int64] | type[int64] | _Int64Codes | _SupportsDType[dtype[int64]] = ..., # noqa: E501 ) -> int64: ... @overload - def randint( # type: ignore[misc] + def randint( self, low: _ArrayLikeInt_co, - high: _ArrayLikeInt_co | None = ..., - size: _ShapeLike | None = ..., + high: _ArrayLikeInt_co | None = None, + size: _ShapeLike | None = None, ) -> NDArray[long]: ... @overload - def randint( # type: ignore[misc] + def randint( self, low: _ArrayLikeInt_co, - high: _ArrayLikeInt_co | None = ..., - size: _ShapeLike | None = ..., + high: _ArrayLikeInt_co | None = None, + size: _ShapeLike | None = None, dtype: _DTypeLikeBool = ..., ) -> NDArray[np.bool]: ... @overload - def randint( # type: ignore[misc] + def randint( self, low: _ArrayLikeInt_co, - high: _ArrayLikeInt_co | None = ..., - size: _ShapeLike | None = ..., + high: _ArrayLikeInt_co | None = None, + size: _ShapeLike | None = None, dtype: dtype[int8] | type[int8] | _Int8Codes | _SupportsDType[dtype[int8]] = ..., # noqa: E501 ) -> NDArray[int8]: ... @overload - def randint( # type: ignore[misc] + def randint( self, low: _ArrayLikeInt_co, - high: _ArrayLikeInt_co | None = ..., - size: _ShapeLike | None = ..., + high: _ArrayLikeInt_co | None = None, + size: _ShapeLike | None = None, dtype: dtype[int16] | type[int16] | _Int16Codes | _SupportsDType[dtype[int16]] = ..., # noqa: E501 ) -> NDArray[int16]: ... @overload - def randint( # type: ignore[misc] + def randint( self, low: _ArrayLikeInt_co, - high: _ArrayLikeInt_co | None = ..., - size: _ShapeLike | None = ..., + high: _ArrayLikeInt_co | None = None, + size: _ShapeLike | None = None, dtype: dtype[int32] | type[int32] | _Int32Codes | _SupportsDType[dtype[int32]] = ..., # noqa: E501 ) -> NDArray[int32]: ... @overload - def randint( # type: ignore[misc] + def randint( self, low: _ArrayLikeInt_co, - high: _ArrayLikeInt_co | None = ..., - size: _ShapeLike | None = ..., + high: _ArrayLikeInt_co | None = None, + size: _ShapeLike | None = None, dtype: dtype[int64] | type[int64] | _Int64Codes | _SupportsDType[dtype[int64]] | None = ..., # noqa: E501 ) -> NDArray[int64]: ... @overload - def randint( # type: ignore[misc] + def randint( self, low: _ArrayLikeInt_co, - high: _ArrayLikeInt_co | None = ..., - size: _ShapeLike | None = ..., + high: _ArrayLikeInt_co | None = None, + size: _ShapeLike | None = None, dtype: dtype[uint8] | type[uint8] | _UInt8Codes | _SupportsDType[dtype[uint8]] = ..., # noqa: E501 ) -> NDArray[uint8]: ... @overload - def randint( # type: ignore[misc] + def randint( self, low: _ArrayLikeInt_co, - high: _ArrayLikeInt_co | None = ..., - size: _ShapeLike | None = ..., + high: _ArrayLikeInt_co | None = None, + size: _ShapeLike | None = None, dtype: dtype[uint16] | type[uint16] | _UInt16Codes | _SupportsDType[dtype[uint16]] = ..., # noqa: E501 ) -> NDArray[uint16]: ... @overload - def randint( # type: ignore[misc] + def randint( self, low: _ArrayLikeInt_co, - high: _ArrayLikeInt_co | None = ..., - size: _ShapeLike | None = ..., + high: _ArrayLikeInt_co | None = None, + size: _ShapeLike | None = None, dtype: dtype[uint32] | type[uint32] | _UInt32Codes | _SupportsDType[dtype[uint32]] = ..., # noqa: E501 ) -> NDArray[uint32]: ... @overload - def randint( # type: ignore[misc] + def randint( self, low: _ArrayLikeInt_co, - high: _ArrayLikeInt_co | None = ..., - size: _ShapeLike | None = ..., + high: _ArrayLikeInt_co | None = None, + size: _ShapeLike | None = None, dtype: dtype[uint64] | type[uint64] | _UInt64Codes | _SupportsDType[dtype[uint64]] = ..., # noqa: E501 ) -> NDArray[uint64]: ... @overload - def randint( # type: ignore[misc] + def randint( self, low: _ArrayLikeInt_co, - high: _ArrayLikeInt_co | None = ..., - size: _ShapeLike | None = ..., + high: _ArrayLikeInt_co | None = None, + size: _ShapeLike | None = None, dtype: dtype[long] | type[int] | type[long] | _LongCodes | _SupportsDType[dtype[long]] = ..., # noqa: E501 ) -> NDArray[long]: ... @overload - def randint( # type: ignore[misc] + def randint( self, low: _ArrayLikeInt_co, - high: _ArrayLikeInt_co | None = ..., - size: _ShapeLike | None = ..., + high: _ArrayLikeInt_co | None = None, + size: _ShapeLike | None = None, dtype: dtype[ulong] | type[ulong] | _ULongCodes | _SupportsDType[dtype[ulong]] = ..., # noqa: E501 ) -> NDArray[ulong]: ... def bytes(self, length: int) -> builtins.bytes: ... @@ -319,44 +375,44 @@ class RandomState: def choice( self, a: int, - size: None = ..., - replace: bool = ..., - p: _ArrayLikeFloat_co | None = ..., + size: None = None, + replace: bool = True, + p: _ArrayLikeFloat_co | None = None, ) -> int: ... @overload def choice( self, a: int, - size: _ShapeLike = ..., - replace: bool = ..., - p: _ArrayLikeFloat_co | None = ..., + size: _ShapeLike | None = None, + replace: bool = True, + p: _ArrayLikeFloat_co | None = None, ) -> NDArray[long]: ... @overload def choice( self, a: ArrayLike, - size: None = ..., - replace: bool = ..., - p: _ArrayLikeFloat_co | None = ..., + size: None = None, + replace: bool = True, + p: _ArrayLikeFloat_co | None = None, ) -> Any: ... @overload def choice( self, a: ArrayLike, - size: _ShapeLike = ..., - replace: bool = ..., - p: _ArrayLikeFloat_co | None = ..., + size: _ShapeLike | None = None, + replace: bool = True, + p: _ArrayLikeFloat_co | None = None, ) -> NDArray[Any]: ... @overload def uniform( - self, low: float = ..., high: float = ..., size: None = ... - ) -> float: ... # type: ignore[misc] + self, low: float = 0.0, high: float = 1.0, size: None = None + ) -> float: ... @overload def uniform( self, - low: _ArrayLikeFloat_co = ..., - high: _ArrayLikeFloat_co = ..., - size: _ShapeLike | None = ..., + low: _ArrayLikeFloat_co = 0.0, + high: _ArrayLikeFloat_co = 1.0, + size: _ShapeLike | None = None, ) -> NDArray[float64]: ... @overload def rand(self) -> float: ... @@ -368,275 +424,275 @@ class RandomState: def randn(self, *args: int) -> NDArray[float64]: ... @overload def random_integers( - self, low: int, high: int | None = ..., size: None = ... - ) -> int: ... # type: ignore[misc] + self, low: int, high: int | None = None, size: None = None + ) -> int: ... @overload def random_integers( self, low: _ArrayLikeInt_co, - high: _ArrayLikeInt_co | None = ..., - size: _ShapeLike | None = ..., + high: _ArrayLikeInt_co | None = None, + size: _ShapeLike | None = None, ) -> NDArray[long]: ... @overload - def standard_normal(self, size: None = ...) -> float: ... # type: ignore[misc] + def standard_normal(self, size: None = None) -> float: ... @overload - def standard_normal( # type: ignore[misc] - self, size: _ShapeLike = ... + def standard_normal( + self, size: _ShapeLike | None = None ) -> NDArray[float64]: ... @overload def normal( - self, loc: float = ..., scale: float = ..., size: None = ... - ) -> float: ... # type: ignore[misc] + self, loc: float = 0.0, scale: float = 1.0, size: None = None + ) -> float: ... @overload def normal( self, - loc: _ArrayLikeFloat_co = ..., - scale: _ArrayLikeFloat_co = ..., - size: _ShapeLike | None = ..., + loc: _ArrayLikeFloat_co = 0.0, + scale: _ArrayLikeFloat_co = 1.0, + size: _ShapeLike | None = None, ) -> NDArray[float64]: ... @overload - def standard_gamma( # type: ignore[misc] + def standard_gamma( self, shape: float, - size: None = ..., + size: None = None, ) -> float: ... @overload def standard_gamma( self, shape: _ArrayLikeFloat_co, - size: _ShapeLike | None = ..., + size: _ShapeLike | None = None, ) -> NDArray[float64]: ... @overload - def gamma(self, shape: float, scale: float = ..., size: None = ...) -> float: ... # type: ignore[misc] + def gamma(self, shape: float, scale: float = 1.0, size: None = None) -> float: ... @overload def gamma( self, shape: _ArrayLikeFloat_co, - scale: _ArrayLikeFloat_co = ..., - size: _ShapeLike | None = ..., + scale: _ArrayLikeFloat_co = 1.0, + size: _ShapeLike | None = None, ) -> NDArray[float64]: ... @overload - def f(self, dfnum: float, dfden: float, size: None = ...) -> float: ... # type: ignore[misc] + def f(self, dfnum: float, dfden: float, size: None = None) -> float: ... @overload def f( self, dfnum: _ArrayLikeFloat_co, dfden: _ArrayLikeFloat_co, - size: _ShapeLike | None = ... + size: _ShapeLike | None = None ) -> NDArray[float64]: ... @overload def noncentral_f( - self, dfnum: float, dfden: float, nonc: float, size: None = ... - ) -> float: ... # type: ignore[misc] + self, dfnum: float, dfden: float, nonc: float, size: None = None + ) -> float: ... @overload def noncentral_f( self, dfnum: _ArrayLikeFloat_co, dfden: _ArrayLikeFloat_co, nonc: _ArrayLikeFloat_co, - size: _ShapeLike | None = ..., + size: _ShapeLike | None = None, ) -> NDArray[float64]: ... @overload - def chisquare(self, df: float, size: None = ...) -> float: ... # type: ignore[misc] + def chisquare(self, df: float, size: None = None) -> float: ... @overload def chisquare( - self, df: _ArrayLikeFloat_co, size: _ShapeLike | None = ... + self, df: _ArrayLikeFloat_co, size: _ShapeLike | None = None ) -> NDArray[float64]: ... @overload def noncentral_chisquare( - self, df: float, nonc: float, size: None = ... - ) -> float: ... # type: ignore[misc] + self, df: float, nonc: float, size: None = None + ) -> float: ... @overload def noncentral_chisquare( self, df: _ArrayLikeFloat_co, nonc: _ArrayLikeFloat_co, - size: _ShapeLike | None = ... + size: _ShapeLike | None = None ) -> NDArray[float64]: ... @overload - def standard_t(self, df: float, size: None = ...) -> float: ... # type: ignore[misc] + def standard_t(self, df: float, size: None = None) -> float: ... @overload def standard_t( - self, df: _ArrayLikeFloat_co, size: None = ... + self, df: _ArrayLikeFloat_co, size: None = None ) -> NDArray[float64]: ... @overload def standard_t( - self, df: _ArrayLikeFloat_co, size: _ShapeLike = ... + self, df: _ArrayLikeFloat_co, size: _ShapeLike | None = None ) -> NDArray[float64]: ... @overload - def vonmises(self, mu: float, kappa: float, size: None = ...) -> float: ... # type: ignore[misc] + def vonmises(self, mu: float, kappa: float, size: None = None) -> float: ... @overload def vonmises( self, mu: _ArrayLikeFloat_co, kappa: _ArrayLikeFloat_co, - size: _ShapeLike | None = ... + size: _ShapeLike | None = None ) -> NDArray[float64]: ... @overload - def pareto(self, a: float, size: None = ...) -> float: ... # type: ignore[misc] + def pareto(self, a: float, size: None = None) -> float: ... @overload def pareto( - self, a: _ArrayLikeFloat_co, size: _ShapeLike | None = ... + self, a: _ArrayLikeFloat_co, size: _ShapeLike | None = None ) -> NDArray[float64]: ... @overload - def weibull(self, a: float, size: None = ...) -> float: ... # type: ignore[misc] + def weibull(self, a: float, size: None = None) -> float: ... @overload def weibull( - self, a: _ArrayLikeFloat_co, size: _ShapeLike | None = ... + self, a: _ArrayLikeFloat_co, size: _ShapeLike | None = None ) -> NDArray[float64]: ... @overload - def power(self, a: float, size: None = ...) -> float: ... # type: ignore[misc] + def power(self, a: float, size: None = None) -> float: ... @overload def power( - self, a: _ArrayLikeFloat_co, size: _ShapeLike | None = ... + self, a: _ArrayLikeFloat_co, size: _ShapeLike | None = None ) -> NDArray[float64]: ... @overload - def standard_cauchy(self, size: None = ...) -> float: ... # type: ignore[misc] + def standard_cauchy(self, size: None = None) -> float: ... @overload - def standard_cauchy(self, size: _ShapeLike = ...) -> NDArray[float64]: ... + def standard_cauchy(self, size: _ShapeLike | None = None) -> NDArray[float64]: ... @overload def laplace( - self, loc: float = ..., scale: float = ..., size: None = ... - ) -> float: ... # type: ignore[misc] + self, loc: float = 0.0, scale: float = 1.0, size: None = None + ) -> float: ... @overload def laplace( self, - loc: _ArrayLikeFloat_co = ..., - scale: _ArrayLikeFloat_co = ..., - size: _ShapeLike | None = ..., + loc: _ArrayLikeFloat_co = 0.0, + scale: _ArrayLikeFloat_co = 1.0, + size: _ShapeLike | None = None, ) -> NDArray[float64]: ... @overload def gumbel( - self, loc: float = ..., scale: float = ..., size: None = ... - ) -> float: ... # type: ignore[misc] + self, loc: float = 0.0, scale: float = 1.0, size: None = None + ) -> float: ... @overload def gumbel( self, - loc: _ArrayLikeFloat_co = ..., - scale: _ArrayLikeFloat_co = ..., - size: _ShapeLike | None = ..., + loc: _ArrayLikeFloat_co = 0.0, + scale: _ArrayLikeFloat_co = 1.0, + size: _ShapeLike | None = None, ) -> NDArray[float64]: ... @overload def logistic( - self, loc: float = ..., scale: float = ..., size: None = ... - ) -> float: ... # type: ignore[misc] + self, loc: float = 0.0, scale: float = 1.0, size: None = None + ) -> float: ... @overload def logistic( self, - loc: _ArrayLikeFloat_co = ..., - scale: _ArrayLikeFloat_co = ..., - size: _ShapeLike | None = ..., + loc: _ArrayLikeFloat_co = 0.0, + scale: _ArrayLikeFloat_co = 1.0, + size: _ShapeLike | None = None, ) -> NDArray[float64]: ... @overload def lognormal( - self, mean: float = ..., sigma: float = ..., size: None = ... - ) -> float: ... # type: ignore[misc] + self, mean: float = 0.0, sigma: float = 1.0, size: None = None + ) -> float: ... @overload def lognormal( self, - mean: _ArrayLikeFloat_co = ..., - sigma: _ArrayLikeFloat_co = ..., - size: _ShapeLike | None = ..., + mean: _ArrayLikeFloat_co = 0.0, + sigma: _ArrayLikeFloat_co = 1.0, + size: _ShapeLike | None = None, ) -> NDArray[float64]: ... @overload - def rayleigh(self, scale: float = ..., size: None = ...) -> float: ... # type: ignore[misc] + def rayleigh(self, scale: float = 1.0, size: None = None) -> float: ... @overload def rayleigh( - self, scale: _ArrayLikeFloat_co = ..., size: _ShapeLike | None = ... + self, scale: _ArrayLikeFloat_co = 1.0, size: _ShapeLike | None = None ) -> NDArray[float64]: ... @overload - def wald(self, mean: float, scale: float, size: None = ...) -> float: ... # type: ignore[misc] + def wald(self, mean: float, scale: float, size: None = None) -> float: ... @overload def wald( self, mean: _ArrayLikeFloat_co, scale: _ArrayLikeFloat_co, - size: _ShapeLike | None = ... + size: _ShapeLike | None = None ) -> NDArray[float64]: ... @overload def triangular( - self, left: float, mode: float, right: float, size: None = ... - ) -> float: ... # type: ignore[misc] + self, left: float, mode: float, right: float, size: None = None + ) -> float: ... @overload def triangular( self, left: _ArrayLikeFloat_co, mode: _ArrayLikeFloat_co, right: _ArrayLikeFloat_co, - size: _ShapeLike | None = ..., + size: _ShapeLike | None = None, ) -> NDArray[float64]: ... @overload def binomial( - self, n: int, p: float, size: None = ... - ) -> int: ... # type: ignore[misc] + self, n: int, p: float, size: None = None + ) -> int: ... @overload def binomial( - self, n: _ArrayLikeInt_co, p: _ArrayLikeFloat_co, size: _ShapeLike | None = ... + self, n: _ArrayLikeInt_co, p: _ArrayLikeFloat_co, size: _ShapeLike | None = None ) -> NDArray[long]: ... @overload def negative_binomial( - self, n: float, p: float, size: None = ... - ) -> int: ... # type: ignore[misc] + self, n: float, p: float, size: None = None + ) -> int: ... @overload def negative_binomial( self, n: _ArrayLikeFloat_co, p: _ArrayLikeFloat_co, - size: _ShapeLike | None = ... + size: _ShapeLike | None = None ) -> NDArray[long]: ... @overload def poisson( - self, lam: float = ..., size: None = ... - ) -> int: ... # type: ignore[misc] + self, lam: float = 1.0, size: None = None + ) -> int: ... @overload def poisson( - self, lam: _ArrayLikeFloat_co = ..., size: _ShapeLike | None = ... + self, lam: _ArrayLikeFloat_co = 1.0, size: _ShapeLike | None = None ) -> NDArray[long]: ... @overload - def zipf(self, a: float, size: None = ...) -> int: ... # type: ignore[misc] + def zipf(self, a: float, size: None = None) -> int: ... @overload def zipf( - self, a: _ArrayLikeFloat_co, size: _ShapeLike | None = ... + self, a: _ArrayLikeFloat_co, size: _ShapeLike | None = None ) -> NDArray[long]: ... @overload - def geometric(self, p: float, size: None = ...) -> int: ... # type: ignore[misc] + def geometric(self, p: float, size: None = None) -> int: ... @overload def geometric( - self, p: _ArrayLikeFloat_co, size: _ShapeLike | None = ... + self, p: _ArrayLikeFloat_co, size: _ShapeLike | None = None ) -> NDArray[long]: ... @overload def hypergeometric( - self, ngood: int, nbad: int, nsample: int, size: None = ... - ) -> int: ... # type: ignore[misc] + self, ngood: int, nbad: int, nsample: int, size: None = None + ) -> int: ... @overload def hypergeometric( self, ngood: _ArrayLikeInt_co, nbad: _ArrayLikeInt_co, nsample: _ArrayLikeInt_co, - size: _ShapeLike | None = ..., + size: _ShapeLike | None = None, ) -> NDArray[long]: ... @overload - def logseries(self, p: float, size: None = ...) -> int: ... # type: ignore[misc] + def logseries(self, p: float, size: None = None) -> int: ... @overload def logseries( - self, p: _ArrayLikeFloat_co, size: _ShapeLike | None = ... + self, p: _ArrayLikeFloat_co, size: _ShapeLike | None = None ) -> NDArray[long]: ... def multivariate_normal( self, mean: _ArrayLikeFloat_co, cov: _ArrayLikeFloat_co, - size: _ShapeLike | None = ..., - check_valid: Literal["warn", "raise", "ignore"] = ..., - tol: float = ..., + size: _ShapeLike | None = None, + check_valid: Literal["warn", "raise", "ignore"] = "warn", + tol: float = 1e-8, ) -> NDArray[float64]: ... def multinomial( self, n: _ArrayLikeInt_co, pvals: _ArrayLikeFloat_co, - size: _ShapeLike | None = ... + size: _ShapeLike | None = None ) -> NDArray[long]: ... def dirichlet( - self, alpha: _ArrayLikeFloat_co, size: _ShapeLike | None = ... + self, alpha: _ArrayLikeFloat_co, size: _ShapeLike | None = None ) -> NDArray[float64]: ... def shuffle(self, x: ArrayLike) -> None: ... @overload diff --git a/numpy/random/mtrand.pyx b/numpy/random/mtrand.pyx index beaf96c06921..c69284d0df9a 100644 --- a/numpy/random/mtrand.pyx +++ b/numpy/random/mtrand.pyx @@ -119,8 +119,8 @@ cdef object int64_to_long(object x): cdef class RandomState: - """ - RandomState(seed=None) + # the first line is used to populate `__text_signature__` + """RandomState(seed=None)\n-- Container for the slow Mersenne Twister pseudo-random number generator. Consider using a different BitGenerator with the Generator container @@ -222,12 +222,13 @@ cdef class RandomState: "be instantized.") self._bitgen = ( PyCapsule_GetPointer(capsule, name))[0] self._aug_state.bit_generator = &self._bitgen - self._reset_gauss() self.lock = bit_generator.lock + self._reset_gauss() cdef _reset_gauss(self): - self._aug_state.has_gauss = 0 - self._aug_state.gauss = 0.0 + with self.lock: + self._aug_state.has_gauss = 0 + self._aug_state.gauss = 0.0 def seed(self, seed=None): """ @@ -251,8 +252,9 @@ cdef class RandomState: """ if not isinstance(self._bit_generator, _MT19937): raise TypeError('can only re-seed a MT19937 BitGenerator') - self._bit_generator._legacy_seeding(seed) - self._reset_gauss() + with self.lock: + self._bit_generator._legacy_seeding(seed) + self._reset_gauss() def get_state(self, legacy=True): """ @@ -300,8 +302,9 @@ cdef class RandomState: 'MT19937 BitGenerator. To silence this warning, ' 'set `legacy` to False.', RuntimeWarning) legacy = False - st['has_gauss'] = self._aug_state.has_gauss - st['gauss'] = self._aug_state.gauss + with self.lock: + st['has_gauss'] = self._aug_state.has_gauss + st['gauss'] = self._aug_state.gauss if legacy and not isinstance(self._bit_generator, _MT19937): raise ValueError( "legacy can only be True when the underlying bitgenerator is " @@ -380,11 +383,14 @@ cdef class RandomState: if len(state) > 3: st['has_gauss'] = state[3] st['gauss'] = state[4] - value = st - self._aug_state.gauss = st.get('gauss', 0.0) - self._aug_state.has_gauss = st.get('has_gauss', 0) - self._bit_generator.state = st + cdef double gauss = st.get('gauss', 0.0) + cdef int has_gauss = st.get('has_gauss', 0) + + with self.lock: + self._aug_state.gauss = gauss + self._aug_state.has_gauss = has_gauss + self._bit_generator.state = st def random_sample(self, size=None): """ @@ -437,7 +443,6 @@ cdef class RandomState: [-1.23204345, -1.75224494]]) """ - cdef double temp return double_fill(&random_standard_uniform_fill, &self._bitgen, size, self.lock, None) def random(self, size=None): @@ -544,16 +549,16 @@ cdef class RandomState: Examples -------- - A real world example: Assume a company has 10000 customer support + A real world example: Assume a company has 10000 customer support agents and the average time between customer calls is 4 minutes. >>> n = 10000 >>> time_between_calls = np.random.default_rng().exponential(scale=4, size=n) - What is the probability that a customer will call in the next - 4 to 5 minutes? - - >>> x = ((time_between_calls < 5).sum())/n + What is the probability that a customer will call in the next + 4 to 5 minutes? + + >>> x = ((time_between_calls < 5).sum())/n >>> y = ((time_between_calls < 4).sum())/n >>> x-y 0.08 # may vary @@ -1089,9 +1094,9 @@ cdef class RandomState: greater than or equal to low. The default value is 0. high : float or array_like of floats Upper boundary of the output interval. All values generated will be - less than or equal to high. The high limit may be included in the - returned array of floats due to floating-point rounding in the - equation ``low + (high-low) * random_sample()``. The default value + less than or equal to high. The high limit may be included in the + returned array of floats due to floating-point rounding in the + equation ``low + (high-low) * random_sample()``. The default value is 1.0. size : int or tuple of ints, optional Output shape. If the given shape is, e.g., ``(m, n, k)``, then @@ -1159,7 +1164,6 @@ cdef class RandomState: >>> plt.show() """ - cdef bint is_scalar = True cdef np.ndarray alow, ahigh, arange cdef double _low, _high, range cdef object temp @@ -2229,14 +2233,14 @@ cdef class RandomState: Does their energy intake deviate systematically from the recommended value of 7725 kJ? Our null hypothesis will be the absence of deviation, and the alternate hypothesis will be the presence of an effect that could be - either positive or negative, hence making our test 2-tailed. + either positive or negative, hence making our test 2-tailed. Because we are estimating the mean and we have N=11 values in our sample, - we have N-1=10 degrees of freedom. We set our significance level to 95% and - compute the t statistic using the empirical mean and empirical standard - deviation of our intake. We use a ddof of 1 to base the computation of our + we have N-1=10 degrees of freedom. We set our significance level to 95% and + compute the t statistic using the empirical mean and empirical standard + deviation of our intake. We use a ddof of 1 to base the computation of our empirical standard deviation on an unbiased estimate of the variance (note: - the final estimate is not unbiased due to the concave nature of the square + the final estimate is not unbiased due to the concave nature of the square root). >>> np.mean(intake) @@ -2254,18 +2258,18 @@ cdef class RandomState: >>> s = np.random.standard_t(10, size=1000000) >>> h = plt.hist(s, bins=100, density=True) - Does our t statistic land in one of the two critical regions found at + Does our t statistic land in one of the two critical regions found at both tails of the distribution? >>> np.sum(np.abs(t) < np.abs(s)) / float(len(s)) 0.018318 #random < 0.05, statistic is in critical region - The probability value for this 2-tailed test is about 1.83%, which is - lower than the 5% pre-determined significance threshold. + The probability value for this 2-tailed test is about 1.83%, which is + lower than the 5% pre-determined significance threshold. Therefore, the probability of observing values as extreme as our intake - conditionally on the null hypothesis being true is too low, and we reject - the null hypothesis of no deviation. + conditionally on the null hypothesis being true is too low, and we reject + the null hypothesis of no deviation. """ return cont(&legacy_standard_t, &self._aug_state, size, self.lock, 1, @@ -3325,7 +3329,6 @@ cdef class RandomState: >>> plt.show() """ - cdef bint is_scalar = True cdef double fleft, fmode, fright cdef np.ndarray oleft, omode, oright @@ -3947,7 +3950,6 @@ cdef class RandomState: # answer = 0.003 ... pretty unlikely! """ - cdef bint is_scalar = True cdef np.ndarray ongood, onbad, onsample cdef int64_t lngood, lnbad, lnsample @@ -4147,7 +4149,8 @@ cdef class RandomState: >>> mean = [0, 0] >>> cov = [[1, 0], [0, 100]] # diagonal covariance - Diagonal covariance means that points are oriented along x or y-axis: + Diagonal covariance means that the variables are independent, and the + probability density contours have their axes aligned with the coordinate axes: >>> import matplotlib.pyplot as plt >>> x, y = np.random.multivariate_normal(mean, cov, 5000).T @@ -4247,7 +4250,7 @@ cdef class RandomState: # GH10839, ensure double to make tol meaningful cov = cov.astype(np.double) - (u, s, v) = svd(cov) + (_u, s, v) = svd(cov) if check_valid != 'ignore': if check_valid != 'warn' and check_valid != 'raise': diff --git a/numpy/random/src/distributions/distributions.c b/numpy/random/src/distributions/distributions.c index aa81a4a173d4..79cacb2df4a4 100644 --- a/numpy/random/src/distributions/distributions.c +++ b/numpy/random/src/distributions/distributions.c @@ -468,12 +468,15 @@ double random_chisquare(bitgen_t *bitgen_state, double df) { } double random_f(bitgen_t *bitgen_state, double dfnum, double dfden) { - return ((random_chisquare(bitgen_state, dfnum) * dfden) / - (random_chisquare(bitgen_state, dfden) * dfnum)); + double subexpr1 = random_chisquare(bitgen_state, dfnum) * dfden; + double subexpr2 = random_chisquare(bitgen_state, dfden) * dfnum; + return subexpr1 / subexpr2; } double random_standard_cauchy(bitgen_t *bitgen_state) { - return random_standard_normal(bitgen_state) / random_standard_normal(bitgen_state); + double subexpr1 = random_standard_normal(bitgen_state); + double subexpr2 = random_standard_normal(bitgen_state); + return subexpr1 / subexpr2; } double random_pareto(bitgen_t *bitgen_state, double a) { @@ -594,7 +597,7 @@ static RAND_INT_TYPE random_poisson_ptrs(bitgen_t *bitgen_state, double lam) { /* log(V) == log(0.0) ok here */ /* if U==0.0 so that us==0.0, log is ok since always returns */ if ((log(V) + log(invalpha) - log(a / (us * us) + b)) <= - (-lam + k * loglam - random_loggam(k + 1))) { + (-lam + (double)k * loglam - random_loggam((double)k + 1))) { return k; } } @@ -730,10 +733,10 @@ RAND_INT_TYPE random_binomial_btpe(bitgen_t *bitgen_state, RAND_INT_TYPE n, if (A > (t + rho)) goto Step10; - x1 = y + 1; - f1 = m + 1; - z = n + 1 - m; - w = n - y + 1; + x1 = (double)y + 1; + f1 = (double)m + 1; + z = (double)n + 1 - (double)m; + w = (double)n - (double)y + 1; x2 = x1 * x1; f2 = f1 * f1; z2 = z * z; @@ -770,7 +773,7 @@ RAND_INT_TYPE random_binomial_inversion(bitgen_t *bitgen_state, RAND_INT_TYPE n, binomial->psave = p; binomial->has_binomial = 1; binomial->q = q = 1.0 - p; - binomial->r = qn = exp(n * log(q)); + binomial->r = qn = exp(n * log1p(-p)); binomial->c = np = n * p; binomial->m = bound = (RAND_INT_TYPE)MIN(n, np + 10.0 * sqrt(np * q + 1)); } else { @@ -845,12 +848,12 @@ double random_noncentral_f(bitgen_t *bitgen_state, double dfnum, double dfden, double random_wald(bitgen_t *bitgen_state, double mean, double scale) { double U, X, Y; - double mu_2l; + double d; - mu_2l = mean / (2 * scale); Y = random_standard_normal(bitgen_state); Y = mean * Y * Y; - X = mean + mu_2l * (Y - sqrt(4 * scale * Y + Y * Y)); + d = 1 + sqrt(1 + 4 * scale / Y); + X = mean * (1 - 2 / d); U = next_double(bitgen_state); if (U <= mean / (mean + X)) { return X; diff --git a/numpy/random/src/distributions/logfactorial.c b/numpy/random/src/distributions/logfactorial.c index 1305164699fa..337ec1a98db5 100644 --- a/numpy/random/src/distributions/logfactorial.c +++ b/numpy/random/src/distributions/logfactorial.c @@ -154,5 +154,5 @@ double logfactorial(int64_t k) * was within 2 ULP of the best 64 bit floating point value for * k up to 10000000.) */ - return (k + 0.5)*log(k) - k + (halfln2pi + (1.0/k)*(1/12.0 - 1/(360.0*k*k))); + return (k + 0.5)*log((double)k) - k + (halfln2pi + (1.0/k)*(1/12.0 - 1/(360.0*k*k))); } diff --git a/numpy/random/src/legacy/legacy-distributions.c b/numpy/random/src/legacy/legacy-distributions.c index 14d9ce25f255..e84bd19fdaee 100644 --- a/numpy/random/src/legacy/legacy-distributions.c +++ b/numpy/random/src/legacy/legacy-distributions.c @@ -228,6 +228,44 @@ double legacy_exponential(aug_bitgen_t *aug_state, double scale) { return scale * legacy_standard_exponential(aug_state); } +static RAND_INT_TYPE legacy_random_binomial_inversion( + bitgen_t *bitgen_state, RAND_INT_TYPE n, double p, binomial_t *binomial +) +{ + double q, qn, np, px, U; + RAND_INT_TYPE X, bound; + + if (!(binomial->has_binomial) || (binomial->nsave != n) || + (binomial->psave != p)) { + binomial->nsave = n; + binomial->psave = p; + binomial->has_binomial = 1; + binomial->q = q = 1.0 - p; + binomial->r = qn = exp(n * log(q)); + binomial->c = np = n * p; + binomial->m = bound = (RAND_INT_TYPE)MIN(n, np + 10.0 * sqrt(np * q + 1)); + } else { + q = binomial->q; + qn = binomial->r; + np = binomial->c; + bound = binomial->m; + } + X = 0; + px = qn; + U = next_double(bitgen_state); + while (U > px) { + X++; + if (X > bound) { + X = 0; + px = qn; + U = next_double(bitgen_state); + } else { + U -= px; + px = ((n - X + 1) * p * px) / (X * q); + } + } + return X; +} static RAND_INT_TYPE legacy_random_binomial_original(bitgen_t *bitgen_state, double p, @@ -237,14 +275,14 @@ static RAND_INT_TYPE legacy_random_binomial_original(bitgen_t *bitgen_state, if (p <= 0.5) { if (p * n <= 30.0) { - return random_binomial_inversion(bitgen_state, n, p, binomial); + return legacy_random_binomial_inversion(bitgen_state, n, p, binomial); } else { return random_binomial_btpe(bitgen_state, n, p, binomial); } } else { q = 1.0 - p; if (q * n <= 30.0) { - return n - random_binomial_inversion(bitgen_state, n, q, binomial); + return n - legacy_random_binomial_inversion(bitgen_state, n, q, binomial); } else { return n - random_binomial_btpe(bitgen_state, n, q, binomial); } @@ -431,7 +469,7 @@ int64_t legacy_random_geometric(bitgen_t *bitgen_state, double p) { void legacy_random_multinomial(bitgen_t *bitgen_state, RAND_INT_TYPE n, RAND_INT_TYPE *mnix, double *pix, npy_intp d, binomial_t *binomial) { - return random_multinomial(bitgen_state, n, mnix, pix, d, binomial); + random_multinomial(bitgen_state, n, mnix, pix, d, binomial); } double legacy_vonmises(bitgen_t *bitgen_state, double mu, double kappa) { diff --git a/numpy/random/src/mt19937/randomkit.c b/numpy/random/src/mt19937/randomkit.c index 32f40fa49cc1..21d270234c9a 100644 --- a/numpy/random/src/mt19937/randomkit.c +++ b/numpy/random/src/mt19937/randomkit.c @@ -62,6 +62,8 @@ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ +#include + /* static char const rcsid[] = "@(#) $Jeannot: randomkit.c,v 1.28 2005/07/21 22:14:09 js Exp $"; */ diff --git a/numpy/random/tests/test_direct.py b/numpy/random/tests/test_direct.py index 6f069e48879f..9916f8ad3440 100644 --- a/numpy/random/tests/test_direct.py +++ b/numpy/random/tests/test_direct.py @@ -572,6 +572,9 @@ def test_passthrough(self): assert rg2 is rg assert rg2.bit_generator is bg + @pytest.mark.thread_unsafe( + reason="np.random.set_bit_generator affects global state" + ) def test_coercion_RandomState_Generator(self): # use default_rng to coerce RandomState to Generator rs = RandomState(1234) diff --git a/numpy/random/tests/test_extending.py b/numpy/random/tests/test_extending.py index 7a079d6362e8..a1e64ecbe343 100644 --- a/numpy/random/tests/test_extending.py +++ b/numpy/random/tests/test_extending.py @@ -57,6 +57,10 @@ @pytest.mark.skipif(sysconfig.get_platform() == 'win-arm64', reason='Meson unable to find MSVC linker on win-arm64') @pytest.mark.slow +@pytest.mark.thread_unsafe( + reason="building cython code in a subprocess doesn't make sense to do in many " + "threads and sometimes crashes" +) def test_cython(tmp_path): import glob # build the examples in a temporary directory diff --git a/numpy/random/tests/test_generator_mt19937.py b/numpy/random/tests/test_generator_mt19937.py index d09cbba4ec39..7d13c49149b3 100644 --- a/numpy/random/tests/test_generator_mt19937.py +++ b/numpy/random/tests/test_generator_mt19937.py @@ -1,6 +1,7 @@ import hashlib import os.path import sys +import warnings import pytest @@ -17,8 +18,6 @@ assert_equal, assert_no_warnings, assert_raises, - assert_warns, - suppress_warnings, ) random = Generator(MT19937()) @@ -100,6 +99,24 @@ def test_p_is_nan(self): # Issue #4571. assert_raises(ValueError, random.binomial, 1, np.nan) + def test_p_extremely_small(self): + n = 50000000000 + p = 5e-17 + sample_size = 20000000 + x = random.binomial(n, p, size=sample_size) + sample_mean = x.mean() + expected_mean = n * p + sigma = np.sqrt(n * p * (1 - p) / sample_size) + # Note: the parameters were chosen so that expected_mean - 6*sigma + # is a positive value. The first `assert` below validates that + # assumption (in case someone edits the parameters in the future). + # The second `assert` is the actual test. + low_bound = expected_mean - 6 * sigma + assert low_bound > 0, "bad test params: 6-sigma lower bound is negative" + test_msg = (f"sample mean {sample_mean} deviates from the expected mean " + f"{expected_mean} by more than 6*sigma") + assert abs(expected_mean - sample_mean) < 6 * sigma, test_msg + class TestMultinomial: def test_basic(self): @@ -158,8 +175,7 @@ def test_multinomial_pvals_float32(self): class TestMultivariateHypergeometric: - def setup_method(self): - self.seed = 8675309 + seed = 8675309 def test_argument_validation(self): # Error cases... @@ -291,37 +307,40 @@ def test_repeatability3(self): class TestSetState: - def setup_method(self): - self.seed = 1234567890 - self.rg = Generator(MT19937(self.seed)) - self.bit_generator = self.rg.bit_generator - self.state = self.bit_generator.state - self.legacy_state = (self.state['bit_generator'], - self.state['state']['key'], - self.state['state']['pos']) + def _create_rng(self): + seed = 1234567890 + rg = Generator(MT19937(seed)) + bit_generator = rg.bit_generator + state = bit_generator.state + legacy_state = (state['bit_generator'], + state['state']['key'], + state['state']['pos']) + return rg, bit_generator, state def test_gaussian_reset(self): # Make sure the cached every-other-Gaussian is reset. - old = self.rg.standard_normal(size=3) - self.bit_generator.state = self.state - new = self.rg.standard_normal(size=3) + rg, bit_generator, state = self._create_rng() + old = rg.standard_normal(size=3) + bit_generator.state = state + new = rg.standard_normal(size=3) assert_(np.all(old == new)) def test_gaussian_reset_in_media_res(self): # When the state is saved with a cached Gaussian, make sure the # cached Gaussian is restored. - - self.rg.standard_normal() - state = self.bit_generator.state - old = self.rg.standard_normal(size=3) - self.bit_generator.state = state - new = self.rg.standard_normal(size=3) + rg, bit_generator, state = self._create_rng() + rg.standard_normal() + state = bit_generator.state + old = rg.standard_normal(size=3) + bit_generator.state = state + new = rg.standard_normal(size=3) assert_(np.all(old == new)) def test_negative_binomial(self): # Ensure that the negative binomial results take floating point # arguments without truncation. - self.rg.negative_binomial(0.5, 0.5) + rg, _, _ = self._create_rng() + rg.negative_binomial(0.5, 0.5) class TestIntegers: @@ -719,9 +738,7 @@ def test_integers_small_dtype_chisquared(self, sample_size, high, class TestRandomDist: # Make sure the random distribution returns the correct value for a # given seed - - def setup_method(self): - self.seed = 1234567890 + seed = 1234567890 def test_integers(self): random = Generator(MT19937(self.seed)) @@ -1249,6 +1266,7 @@ def test_dirichlet_small_alpha(self): assert_array_almost_equal(actual, expected, decimal=15) @pytest.mark.slow + @pytest.mark.thread_unsafe(reason="crashes with low memory") def test_dirichlet_moderately_small_alpha(self): # Use alpha.max() < 0.1 to trigger stick breaking code path alpha = np.array([0.02, 0.04, 0.03]) @@ -1463,8 +1481,8 @@ def test_multivariate_normal(self, method): # Check that non positive-semidefinite covariance warns with # RuntimeWarning cov = [[1, 2], [2, 1]] - assert_warns(RuntimeWarning, random.multivariate_normal, mean, cov) - assert_warns(RuntimeWarning, random.multivariate_normal, mean, cov, + pytest.warns(RuntimeWarning, random.multivariate_normal, mean, cov) + pytest.warns(RuntimeWarning, random.multivariate_normal, mean, cov, method='eigh') assert_raises(LinAlgError, random.multivariate_normal, mean, cov, method='cholesky') @@ -1491,10 +1509,9 @@ def test_multivariate_normal(self, method): method='cholesky') cov = np.array([[1, 0.1], [0.1, 1]], dtype=np.float32) - with suppress_warnings() as sup: + with warnings.catch_warnings(): + warnings.simplefilter("error") random.multivariate_normal(mean, cov, method=method) - w = sup.record(RuntimeWarning) - assert len(w) == 0 mu = np.zeros(2) cov = np.eye(2) @@ -1858,6 +1875,11 @@ def test_wald(self): [2.07093587449261, 0.73073890064369]]) assert_array_almost_equal(actual, desired, decimal=14) + def test_wald_nonnegative(self): + random = Generator(MT19937(self.seed)) + samples = random.wald(mean=1e9, scale=2.25, size=1000) + assert_(np.all(samples >= 0.0)) + def test_weibull(self): random = Generator(MT19937(self.seed)) actual = random.weibull(a=1.23, size=(3, 2)) @@ -1883,8 +1905,7 @@ def test_zipf(self): class TestBroadcast: # tests that functions that broadcast behave # correctly when presented with non-scalar arguments - def setup_method(self): - self.seed = 123456789 + seed = 123456789 def test_uniform(self): random = Generator(MT19937(self.seed)) @@ -2496,8 +2517,7 @@ def test_empty_outputs(self): @pytest.mark.skipif(IS_WASM, reason="can't start thread") class TestThread: # make sure each state produces the same sequence even in threads - def setup_method(self): - self.seeds = range(4) + seeds = range(4) def check_function(self, function, sz): from threading import Thread @@ -2542,13 +2562,11 @@ def gen_random(state, out): # See Issue #4263 class TestSingleEltArrayInput: - def setup_method(self): - self.argOne = np.array([2]) - self.argTwo = np.array([3]) - self.argThree = np.array([4]) - self.tgtShape = (1,) + def _create_arrays(self): + return np.array([2]), np.array([3]), np.array([4]), (1,) def test_one_arg_funcs(self): + argOne, _, _, tgtShape = self._create_arrays() funcs = (random.exponential, random.standard_gamma, random.chisquare, random.standard_t, random.pareto, random.weibull, @@ -2563,11 +2581,12 @@ def test_one_arg_funcs(self): out = func(np.array([0.5])) else: - out = func(self.argOne) + out = func(argOne) - assert_equal(out.shape, self.tgtShape) + assert_equal(out.shape, tgtShape) def test_two_arg_funcs(self): + argOne, argTwo, _, tgtShape = self._create_arrays() funcs = (random.uniform, random.normal, random.beta, random.gamma, random.f, random.noncentral_chisquare, @@ -2583,18 +2602,19 @@ def test_two_arg_funcs(self): argTwo = np.array([0.5]) else: - argTwo = self.argTwo + argTwo = argTwo - out = func(self.argOne, argTwo) - assert_equal(out.shape, self.tgtShape) + out = func(argOne, argTwo) + assert_equal(out.shape, tgtShape) - out = func(self.argOne[0], argTwo) - assert_equal(out.shape, self.tgtShape) + out = func(argOne[0], argTwo) + assert_equal(out.shape, tgtShape) - out = func(self.argOne, argTwo[0]) - assert_equal(out.shape, self.tgtShape) + out = func(argOne, argTwo[0]) + assert_equal(out.shape, tgtShape) def test_integers(self, endpoint): + _, _, _, tgtShape = self._create_arrays() itype = [np.bool, np.int8, np.uint8, np.int16, np.uint16, np.int32, np.uint32, np.int64, np.uint64] func = random.integers @@ -2603,27 +2623,28 @@ def test_integers(self, endpoint): for dt in itype: out = func(low, high, endpoint=endpoint, dtype=dt) - assert_equal(out.shape, self.tgtShape) + assert_equal(out.shape, tgtShape) out = func(low[0], high, endpoint=endpoint, dtype=dt) - assert_equal(out.shape, self.tgtShape) + assert_equal(out.shape, tgtShape) out = func(low, high[0], endpoint=endpoint, dtype=dt) - assert_equal(out.shape, self.tgtShape) + assert_equal(out.shape, tgtShape) def test_three_arg_funcs(self): + argOne, argTwo, argThree, tgtShape = self._create_arrays() funcs = [random.noncentral_f, random.triangular, random.hypergeometric] for func in funcs: - out = func(self.argOne, self.argTwo, self.argThree) - assert_equal(out.shape, self.tgtShape) + out = func(argOne, argTwo, argThree) + assert_equal(out.shape, tgtShape) - out = func(self.argOne[0], self.argTwo, self.argThree) - assert_equal(out.shape, self.tgtShape) + out = func(argOne[0], argTwo, argThree) + assert_equal(out.shape, tgtShape) - out = func(self.argOne, self.argTwo[0], self.argThree) - assert_equal(out.shape, self.tgtShape) + out = func(argOne, argTwo[0], argThree) + assert_equal(out.shape, tgtShape) @pytest.mark.parametrize("config", JUMP_TEST_DATA) diff --git a/numpy/random/tests/test_generator_mt19937_regressions.py b/numpy/random/tests/test_generator_mt19937_regressions.py index abfacb87dbc5..21093ef73eb6 100644 --- a/numpy/random/tests/test_generator_mt19937_regressions.py +++ b/numpy/random/tests/test_generator_mt19937_regressions.py @@ -6,30 +6,32 @@ class TestRegression: - - def setup_method(self): - self.mt19937 = Generator(MT19937(121263137472525314065)) + def _create_generator(self): + return Generator(MT19937(121263137472525314065)) def test_vonmises_range(self): # Make sure generated random variables are in [-pi, pi]. # Regression test for ticket #986. + mt19937 = self._create_generator() for mu in np.linspace(-7., 7., 5): - r = self.mt19937.vonmises(mu, 1, 50) + r = mt19937.vonmises(mu, 1, 50) assert_(np.all(r > -np.pi) and np.all(r <= np.pi)) def test_hypergeometric_range(self): # Test for ticket #921 - assert_(np.all(self.mt19937.hypergeometric(3, 18, 11, size=10) < 4)) - assert_(np.all(self.mt19937.hypergeometric(18, 3, 11, size=10) > 0)) + mt19937 = self._create_generator() + assert_(np.all(mt19937.hypergeometric(3, 18, 11, size=10) < 4)) + assert_(np.all(mt19937.hypergeometric(18, 3, 11, size=10) > 0)) # Test for ticket #5623 args = (2**20 - 2, 2**20 - 2, 2**20 - 2) # Check for 32-bit systems - assert_(self.mt19937.hypergeometric(*args) > 0) + assert_(mt19937.hypergeometric(*args) > 0) def test_logseries_convergence(self): # Test for ticket #923 + mt19937 = self._create_generator() N = 1000 - rvsn = self.mt19937.logseries(0.8, size=N) + rvsn = mt19937.logseries(0.8, size=N) # these two frequency counts should be close to theoretical # numbers with this large sample # theoretical large N result is 0.49706795 @@ -66,34 +68,39 @@ def test_multivariate_normal_size_types(self): # Test for multivariate_normal issue with 'size' argument. # Check that the multivariate_normal size argument can be a # numpy integer. - self.mt19937.multivariate_normal([0], [[0]], size=1) - self.mt19937.multivariate_normal([0], [[0]], size=np.int_(1)) - self.mt19937.multivariate_normal([0], [[0]], size=np.int64(1)) + mt19937 = self._create_generator() + mt19937.multivariate_normal([0], [[0]], size=1) + mt19937.multivariate_normal([0], [[0]], size=np.int_(1)) + mt19937.multivariate_normal([0], [[0]], size=np.int64(1)) def test_beta_small_parameters(self): # Test that beta with small a and b parameters does not produce # NaNs due to roundoff errors causing 0 / 0, gh-5851 - x = self.mt19937.beta(0.0001, 0.0001, size=100) + mt19937 = self._create_generator() + x = mt19937.beta(0.0001, 0.0001, size=100) assert_(not np.any(np.isnan(x)), 'Nans in mt19937.beta') def test_beta_very_small_parameters(self): # gh-24203: beta would hang with very small parameters. - self.mt19937.beta(1e-49, 1e-40) + mt19937 = self._create_generator() + mt19937.beta(1e-49, 1e-40) def test_beta_ridiculously_small_parameters(self): # gh-24266: beta would generate nan when the parameters # were subnormal or a small multiple of the smallest normal. + mt19937 = self._create_generator() tiny = np.finfo(1.0).tiny - x = self.mt19937.beta(tiny / 32, tiny / 40, size=50) + x = mt19937.beta(tiny / 32, tiny / 40, size=50) assert not np.any(np.isnan(x)) def test_beta_expected_zero_frequency(self): # gh-24475: For small a and b (e.g. a=0.0025, b=0.0025), beta # would generate too many zeros. + mt19937 = self._create_generator() a = 0.0025 b = 0.0025 n = 1000000 - x = self.mt19937.beta(a, b, size=n) + x = mt19937.beta(a, b, size=n) nzeros = np.count_nonzero(x == 0) # beta CDF at x = np.finfo(np.double).smallest_subnormal/2 # is p = 0.0776169083131899, e.g, @@ -114,24 +121,26 @@ def test_choice_sum_of_probs_tolerance(self): # The sum of probs should be 1.0 with some tolerance. # For low precision dtypes the tolerance was too tight. # See numpy github issue 6123. + mt19937 = self._create_generator() a = [1, 2, 3] counts = [4, 4, 2] for dt in np.float16, np.float32, np.float64: probs = np.array(counts, dtype=dt) / sum(counts) - c = self.mt19937.choice(a, p=probs) + c = mt19937.choice(a, p=probs) assert_(c in a) with pytest.raises(ValueError): - self.mt19937.choice(a, p=probs * 0.9) + mt19937.choice(a, p=probs * 0.9) def test_shuffle_of_array_of_different_length_strings(self): # Test that permuting an array of different length strings # will not cause a segfault on garbage collection # Tests gh-7710 + mt19937 = self._create_generator() a = np.array(['a', 'a' * 1000]) for _ in range(100): - self.mt19937.shuffle(a) + mt19937.shuffle(a) # Force Garbage Collection - should not segfault. import gc @@ -141,10 +150,11 @@ def test_shuffle_of_array_of_objects(self): # Test that permuting an array of objects will not cause # a segfault on garbage collection. # See gh-7719 + mt19937 = self._create_generator() a = np.array([np.arange(1), np.arange(4)], dtype=object) for _ in range(1000): - self.mt19937.shuffle(a) + mt19937.shuffle(a) # Force Garbage Collection - should not segfault. import gc @@ -174,10 +184,11 @@ def __array__(self, dtype=None, copy=None): assert_array_equal(m.__array__(), np.arange(5)) def test_gamma_0(self): - assert self.mt19937.standard_gamma(0.0) == 0.0 - assert_array_equal(self.mt19937.standard_gamma([0.0]), 0.0) + mt19937 = self._create_generator() + assert mt19937.standard_gamma(0.0) == 0.0 + assert_array_equal(mt19937.standard_gamma([0.0]), 0.0) - actual = self.mt19937.standard_gamma([0.0], dtype='float') + actual = mt19937.standard_gamma([0.0], dtype='float') expected = np.array([0.], dtype=np.float32) assert_array_equal(actual, expected) @@ -185,21 +196,24 @@ def test_geometric_tiny_prob(self): # Regression test for gh-17007. # When p = 1e-30, the probability that a sample will exceed 2**63-1 # is 0.9999999999907766, so we expect the result to be all 2**63-1. - assert_array_equal(self.mt19937.geometric(p=1e-30, size=3), + mt19937 = self._create_generator() + assert_array_equal(mt19937.geometric(p=1e-30, size=3), np.iinfo(np.int64).max) def test_zipf_large_parameter(self): # Regression test for part of gh-9829: a call such as rng.zipf(10000) # would hang. + mt19937 = self._create_generator() n = 8 - sample = self.mt19937.zipf(10000, size=n) + sample = mt19937.zipf(10000, size=n) assert_array_equal(sample, np.ones(n, dtype=np.int64)) def test_zipf_a_near_1(self): # Regression test for gh-9829: a call such as rng.zipf(1.0000000000001) # would hang. + mt19937 = self._create_generator() n = 100000 - sample = self.mt19937.zipf(1.0000000000001, size=n) + sample = mt19937.zipf(1.0000000000001, size=n) # Not much of a test, but let's do something more than verify that # it doesn't hang. Certainly for a monotonically decreasing # discrete distribution truncated to signed 64 bit integers, more diff --git a/numpy/random/tests/test_random.py b/numpy/random/tests/test_random.py index d5981906f6ef..f110aa892b31 100644 --- a/numpy/random/tests/test_random.py +++ b/numpy/random/tests/test_random.py @@ -13,8 +13,6 @@ assert_equal, assert_no_warnings, assert_raises, - assert_warns, - suppress_warnings, ) @@ -107,101 +105,109 @@ def test_multidimensional_pvals(self): class TestSetState: - def setup_method(self): - self.seed = 1234567890 - self.prng = random.RandomState(self.seed) - self.state = self.prng.get_state() + def _create_rng(self): + seed = 1234567890 + prng = random.RandomState(seed) + state = prng.get_state() + return prng, state def test_basic(self): - old = self.prng.tomaxint(16) - self.prng.set_state(self.state) - new = self.prng.tomaxint(16) + prng, state = self._create_rng() + old = prng.tomaxint(16) + prng.set_state(state) + new = prng.tomaxint(16) assert_(np.all(old == new)) def test_gaussian_reset(self): # Make sure the cached every-other-Gaussian is reset. - old = self.prng.standard_normal(size=3) - self.prng.set_state(self.state) - new = self.prng.standard_normal(size=3) + prng, state = self._create_rng() + old = prng.standard_normal(size=3) + prng.set_state(state) + new = prng.standard_normal(size=3) assert_(np.all(old == new)) def test_gaussian_reset_in_media_res(self): # When the state is saved with a cached Gaussian, make sure the # cached Gaussian is restored. - - self.prng.standard_normal() - state = self.prng.get_state() - old = self.prng.standard_normal(size=3) - self.prng.set_state(state) - new = self.prng.standard_normal(size=3) + prng, state = self._create_rng() + prng.standard_normal() + state = prng.get_state() + old = prng.standard_normal(size=3) + prng.set_state(state) + new = prng.standard_normal(size=3) assert_(np.all(old == new)) def test_backwards_compatibility(self): # Make sure we can accept old state tuples that do not have the # cached Gaussian value. - old_state = self.state[:-2] - x1 = self.prng.standard_normal(size=16) - self.prng.set_state(old_state) - x2 = self.prng.standard_normal(size=16) - self.prng.set_state(self.state) - x3 = self.prng.standard_normal(size=16) + prng, state = self._create_rng() + old_state = state[:-2] + x1 = prng.standard_normal(size=16) + prng.set_state(old_state) + x2 = prng.standard_normal(size=16) + prng.set_state(state) + x3 = prng.standard_normal(size=16) assert_(np.all(x1 == x2)) assert_(np.all(x1 == x3)) def test_negative_binomial(self): # Ensure that the negative binomial results take floating point # arguments without truncation. - self.prng.negative_binomial(0.5, 0.5) + prng, _ = self._create_rng() + prng.negative_binomial(0.5, 0.5) def test_set_invalid_state(self): # gh-25402 + prng, _ = self._create_rng() with pytest.raises(IndexError): - self.prng.set_state(()) + prng.set_state(()) class TestRandint: - rfunc = np.random.randint - # valid integer/boolean types itype = [np.bool, np.int8, np.uint8, np.int16, np.uint16, np.int32, np.uint32, np.int64, np.uint64] def test_unsupported_type(self): - assert_raises(TypeError, self.rfunc, 1, dtype=float) + rng = random.RandomState() + assert_raises(TypeError, rng.randint, 1, dtype=float) def test_bounds_checking(self): + rng = random.RandomState() for dt in self.itype: lbnd = 0 if dt is np.bool else np.iinfo(dt).min ubnd = 2 if dt is np.bool else np.iinfo(dt).max + 1 - assert_raises(ValueError, self.rfunc, lbnd - 1, ubnd, dtype=dt) - assert_raises(ValueError, self.rfunc, lbnd, ubnd + 1, dtype=dt) - assert_raises(ValueError, self.rfunc, ubnd, lbnd, dtype=dt) - assert_raises(ValueError, self.rfunc, 1, 0, dtype=dt) + assert_raises(ValueError, rng.randint, lbnd - 1, ubnd, dtype=dt) + assert_raises(ValueError, rng.randint, lbnd, ubnd + 1, dtype=dt) + assert_raises(ValueError, rng.randint, ubnd, lbnd, dtype=dt) + assert_raises(ValueError, rng.randint, 1, 0, dtype=dt) def test_rng_zero_and_extremes(self): + rng = random.RandomState() for dt in self.itype: lbnd = 0 if dt is np.bool else np.iinfo(dt).min ubnd = 2 if dt is np.bool else np.iinfo(dt).max + 1 tgt = ubnd - 1 - assert_equal(self.rfunc(tgt, tgt + 1, size=1000, dtype=dt), tgt) + assert_equal(rng.randint(tgt, tgt + 1, size=1000, dtype=dt), tgt) tgt = lbnd - assert_equal(self.rfunc(tgt, tgt + 1, size=1000, dtype=dt), tgt) + assert_equal(rng.randint(tgt, tgt + 1, size=1000, dtype=dt), tgt) tgt = (lbnd + ubnd) // 2 - assert_equal(self.rfunc(tgt, tgt + 1, size=1000, dtype=dt), tgt) + assert_equal(rng.randint(tgt, tgt + 1, size=1000, dtype=dt), tgt) def test_full_range(self): # Test for ticket #1690 + rng = random.RandomState() for dt in self.itype: lbnd = 0 if dt is np.bool else np.iinfo(dt).min ubnd = 2 if dt is np.bool else np.iinfo(dt).max + 1 try: - self.rfunc(lbnd, ubnd, dtype=dt) + rng.randint(lbnd, ubnd, dtype=dt) except Exception as e: raise AssertionError("No error should have been raised, " "but one was with the following " @@ -209,15 +215,15 @@ def test_full_range(self): def test_in_bounds_fuzz(self): # Don't use fixed seed - np.random.seed() + rng = random.RandomState() for dt in self.itype[1:]: for ubnd in [4, 8, 16]: - vals = self.rfunc(2, ubnd, size=2**16, dtype=dt) + vals = rng.randint(2, ubnd, size=2**16, dtype=dt) assert_(vals.max() < ubnd) assert_(vals.min() >= 2) - vals = self.rfunc(0, 2, size=2**16, dtype=np.bool) + vals = rng.randint(0, 2, size=2**16, dtype=np.bool) assert_(vals.max() < 2) assert_(vals.min() >= 0) @@ -238,20 +244,20 @@ def test_repeatability(self): 'uint8': '001aac3a5acb935a9b186cbe14a1ca064b8bb2dd0b045d48abeacf74d0203404'} # noqa: E501 for dt in self.itype[1:]: - np.random.seed(1234) + rng = random.RandomState(1234) # view as little endian for hash if sys.byteorder == 'little': - val = self.rfunc(0, 6, size=1000, dtype=dt) + val = rng.randint(0, 6, size=1000, dtype=dt) else: - val = self.rfunc(0, 6, size=1000, dtype=dt).byteswap() + val = rng.randint(0, 6, size=1000, dtype=dt).byteswap() res = hashlib.sha256(val.view(np.int8)).hexdigest() assert_(tgt[np.dtype(dt).name] == res) # bools do not depend on endianness - np.random.seed(1234) - val = self.rfunc(0, 2, size=1000, dtype=bool).view(np.int8) + rng = random.RandomState(1234) + val = rng.randint(0, 2, size=1000, dtype=bool).view(np.int8) res = hashlib.sha256(val).hexdigest() assert_(tgt[np.dtype(bool).name] == res) @@ -280,11 +286,12 @@ def test_int64_uint64_corner_case(self): def test_respect_dtype_singleton(self): # See gh-7203 + rng = random.RandomState() for dt in self.itype: lbnd = 0 if dt is np.bool else np.iinfo(dt).min ubnd = 2 if dt is np.bool else np.iinfo(dt).max + 1 - sample = self.rfunc(lbnd, ubnd, dtype=dt) + sample = rng.randint(lbnd, ubnd, dtype=dt) assert_equal(sample.dtype, np.dtype(dt)) for dt in (bool, int): @@ -293,7 +300,7 @@ def test_respect_dtype_singleton(self): ubnd = 2 if dt is bool else np.iinfo("long").max + 1 # gh-7284: Ensure that we get Python data types - sample = self.rfunc(lbnd, ubnd, dtype=dt) + sample = rng.randint(lbnd, ubnd, dtype=dt) assert_(not hasattr(sample, 'dtype')) assert_equal(type(sample), dt) @@ -301,40 +308,36 @@ def test_respect_dtype_singleton(self): class TestRandomDist: # Make sure the random distribution returns the correct value for a # given seed - - def setup_method(self): - self.seed = 1234567890 + seed = 1234567890 def test_rand(self): - np.random.seed(self.seed) - actual = np.random.rand(3, 2) + rng = random.RandomState(self.seed) + actual = rng.rand(3, 2) desired = np.array([[0.61879477158567997, 0.59162362775974664], [0.88868358904449662, 0.89165480011560816], [0.4575674820298663, 0.7781880808593471]]) assert_array_almost_equal(actual, desired, decimal=15) def test_randn(self): - np.random.seed(self.seed) - actual = np.random.randn(3, 2) + rng = random.RandomState(self.seed) + actual = rng.randn(3, 2) desired = np.array([[1.34016345771863121, 1.73759122771936081], [1.498988344300628, -0.2286433324536169], [2.031033998682787, 2.17032494605655257]]) assert_array_almost_equal(actual, desired, decimal=15) def test_randint(self): - np.random.seed(self.seed) - actual = np.random.randint(-99, 99, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.randint(-99, 99, size=(3, 2)) desired = np.array([[31, 3], [-52, 41], [-48, -66]]) assert_array_equal(actual, desired) def test_random_integers(self): - np.random.seed(self.seed) - with suppress_warnings() as sup: - w = sup.record(DeprecationWarning) - actual = np.random.random_integers(-99, 99, size=(3, 2)) - assert_(len(w) == 1) + rng = random.RandomState(self.seed) + with pytest.warns(DeprecationWarning): + actual = rng.random_integers(-99, 99, size=(3, 2)) desired = np.array([[31, 3], [-52, 41], [-48, -66]]) @@ -346,11 +349,9 @@ def test_random_integers_max_int(self): # into a C long. Previous implementations of this # method have thrown an OverflowError when attempting # to generate this integer. - with suppress_warnings() as sup: - w = sup.record(DeprecationWarning) + with pytest.warns(DeprecationWarning): actual = np.random.random_integers(np.iinfo('l').max, np.iinfo('l').max) - assert_(len(w) == 1) desired = np.iinfo('l').max assert_equal(actual, desired) @@ -370,41 +371,41 @@ def test_random_integers_deprecated(self): np.iinfo('l').max, np.iinfo('l').max) def test_random(self): - np.random.seed(self.seed) - actual = np.random.random((3, 2)) + rng = random.RandomState(self.seed) + actual = rng.random((3, 2)) desired = np.array([[0.61879477158567997, 0.59162362775974664], [0.88868358904449662, 0.89165480011560816], [0.4575674820298663, 0.7781880808593471]]) assert_array_almost_equal(actual, desired, decimal=15) def test_choice_uniform_replace(self): - np.random.seed(self.seed) - actual = np.random.choice(4, 4) + rng = random.RandomState(self.seed) + actual = rng.choice(4, 4) desired = np.array([2, 3, 2, 3]) assert_array_equal(actual, desired) def test_choice_nonuniform_replace(self): - np.random.seed(self.seed) - actual = np.random.choice(4, 4, p=[0.4, 0.4, 0.1, 0.1]) + rng = random.RandomState(self.seed) + actual = rng.choice(4, 4, p=[0.4, 0.4, 0.1, 0.1]) desired = np.array([1, 1, 2, 2]) assert_array_equal(actual, desired) def test_choice_uniform_noreplace(self): - np.random.seed(self.seed) - actual = np.random.choice(4, 3, replace=False) + rng = random.RandomState(self.seed) + actual = rng.choice(4, 3, replace=False) desired = np.array([0, 1, 3]) assert_array_equal(actual, desired) def test_choice_nonuniform_noreplace(self): - np.random.seed(self.seed) - actual = np.random.choice(4, 3, replace=False, + rng = random.RandomState(self.seed) + actual = rng.choice(4, 3, replace=False, p=[0.1, 0.3, 0.5, 0.1]) desired = np.array([2, 3, 1]) assert_array_equal(actual, desired) def test_choice_noninteger(self): - np.random.seed(self.seed) - actual = np.random.choice(['a', 'b', 'c', 'd'], 4) + rng = random.RandomState(self.seed) + actual = rng.choice(['a', 'b', 'c', 'd'], 4) desired = np.array(['c', 'd', 'c', 'd']) assert_array_equal(actual, desired) @@ -479,8 +480,8 @@ def test_choice_nan_probabilities(self): assert_raises(ValueError, np.random.choice, a, p=p) def test_bytes(self): - np.random.seed(self.seed) - actual = np.random.bytes(10) + rng = random.RandomState(self.seed) + actual = rng.bytes(10) desired = b'\x82Ui\x9e\xff\x97+Wf\xa5' assert_equal(actual, desired) @@ -503,9 +504,9 @@ def test_shuffle(self): # gh-4270 lambda x: np.asarray([(i, i) for i in x], [("a", object), ("b", np.int32)])]: - np.random.seed(self.seed) + rng = random.RandomState(self.seed) alist = conv([1, 2, 3, 4, 5, 6, 7, 8, 9, 0]) - np.random.shuffle(alist) + rng.shuffle(alist) actual = alist desired = conv([0, 1, 9, 6, 2, 4, 5, 8, 7, 3]) assert_array_equal(actual, desired) @@ -565,11 +566,11 @@ def test_shuffle_memoryview(self): # gh-18273 # allow graceful handling of memoryviews # (treat the same as arrays) - np.random.seed(self.seed) + rng = random.RandomState(self.seed) a = np.arange(5).data - np.random.shuffle(a) + rng.shuffle(a) assert_equal(np.asarray(a), [0, 1, 4, 3, 2]) - rng = np.random.RandomState(self.seed) + rng = random.RandomState(self.seed) rng.shuffle(a) assert_equal(np.asarray(a), [0, 1, 2, 3, 4]) rng = np.random.default_rng(self.seed) @@ -583,8 +584,8 @@ def test_shuffle_not_writeable(self): np.random.shuffle(a) def test_beta(self): - np.random.seed(self.seed) - actual = np.random.beta(.1, .9, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.beta(.1, .9, size=(3, 2)) desired = np.array( [[1.45341850513746058e-02, 5.31297615662868145e-04], [1.85366619058432324e-06, 4.19214516800110563e-03], @@ -592,25 +593,25 @@ def test_beta(self): assert_array_almost_equal(actual, desired, decimal=15) def test_binomial(self): - np.random.seed(self.seed) - actual = np.random.binomial(100, .456, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.binomial(100, .456, size=(3, 2)) desired = np.array([[37, 43], [42, 48], [46, 45]]) assert_array_equal(actual, desired) def test_chisquare(self): - np.random.seed(self.seed) - actual = np.random.chisquare(50, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.chisquare(50, size=(3, 2)) desired = np.array([[63.87858175501090585, 68.68407748911370447], [65.77116116901505904, 47.09686762438974483], [72.3828403199695174, 74.18408615260374006]]) assert_array_almost_equal(actual, desired, decimal=13) def test_dirichlet(self): - np.random.seed(self.seed) + rng = random.RandomState(self.seed) alpha = np.array([51.72840233779265162, 39.74494232180943953]) - actual = np.random.mtrand.dirichlet(alpha, size=(3, 2)) + actual = rng.dirichlet(alpha, size=(3, 2)) desired = np.array([[[0.54539444573611562, 0.45460555426388438], [0.62345816822039413, 0.37654183177960598]], [[0.55206000085785778, 0.44793999914214233], @@ -643,8 +644,8 @@ def test_dirichlet_bad_alpha(self): assert_raises(ValueError, random.dirichlet, np.array([[5, 1], [1, 5]])) def test_exponential(self): - np.random.seed(self.seed) - actual = np.random.exponential(1.1234, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.exponential(1.1234, size=(3, 2)) desired = np.array([[1.08342649775011624, 1.00607889924557314], [2.46628830085216721, 2.49668106809923884], [0.68717433461363442, 1.69175666993575979]]) @@ -655,16 +656,16 @@ def test_exponential_0(self): assert_raises(ValueError, np.random.exponential, scale=-0.) def test_f(self): - np.random.seed(self.seed) - actual = np.random.f(12, 77, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.f(12, 77, size=(3, 2)) desired = np.array([[1.21975394418575878, 1.75135759791559775], [1.44803115017146489, 1.22108959480396262], [1.02176975757740629, 1.34431827623300415]]) assert_array_almost_equal(actual, desired, decimal=15) def test_gamma(self): - np.random.seed(self.seed) - actual = np.random.gamma(5, 3, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.gamma(5, 3, size=(3, 2)) desired = np.array([[24.60509188649287182, 28.54993563207210627], [26.13476110204064184, 12.56988482927716078], [31.71863275789960568, 33.30143302795922011]]) @@ -675,16 +676,16 @@ def test_gamma_0(self): assert_raises(ValueError, np.random.gamma, shape=-0., scale=-0.) def test_geometric(self): - np.random.seed(self.seed) - actual = np.random.geometric(.123456789, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.geometric(.123456789, size=(3, 2)) desired = np.array([[8, 7], [17, 17], [5, 12]]) assert_array_equal(actual, desired) def test_gumbel(self): - np.random.seed(self.seed) - actual = np.random.gumbel(loc=.123456789, scale=2.0, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.gumbel(loc=.123456789, scale=2.0, size=(3, 2)) desired = np.array([[0.19591898743416816, 0.34405539668096674], [-1.4492522252274278, -1.47374816298446865], [1.10651090478803416, -0.69535848626236174]]) @@ -695,34 +696,34 @@ def test_gumbel_0(self): assert_raises(ValueError, np.random.gumbel, scale=-0.) def test_hypergeometric(self): - np.random.seed(self.seed) - actual = np.random.hypergeometric(10, 5, 14, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.hypergeometric(10, 5, 14, size=(3, 2)) desired = np.array([[10, 10], [10, 10], [9, 9]]) assert_array_equal(actual, desired) # Test nbad = 0 - actual = np.random.hypergeometric(5, 0, 3, size=4) + actual = rng.hypergeometric(5, 0, 3, size=4) desired = np.array([3, 3, 3, 3]) assert_array_equal(actual, desired) - actual = np.random.hypergeometric(15, 0, 12, size=4) + actual = rng.hypergeometric(15, 0, 12, size=4) desired = np.array([12, 12, 12, 12]) assert_array_equal(actual, desired) # Test ngood = 0 - actual = np.random.hypergeometric(0, 5, 3, size=4) + actual = rng.hypergeometric(0, 5, 3, size=4) desired = np.array([0, 0, 0, 0]) assert_array_equal(actual, desired) - actual = np.random.hypergeometric(0, 15, 12, size=4) + actual = rng.hypergeometric(0, 15, 12, size=4) desired = np.array([0, 0, 0, 0]) assert_array_equal(actual, desired) def test_laplace(self): - np.random.seed(self.seed) - actual = np.random.laplace(loc=.123456789, scale=2.0, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.laplace(loc=.123456789, scale=2.0, size=(3, 2)) desired = np.array([[0.66599721112760157, 0.52829452552221945], [3.12791959514407125, 3.18202813572992005], [-0.05391065675859356, 1.74901336242837324]]) @@ -733,16 +734,16 @@ def test_laplace_0(self): assert_raises(ValueError, np.random.laplace, scale=-0.) def test_logistic(self): - np.random.seed(self.seed) - actual = np.random.logistic(loc=.123456789, scale=2.0, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.logistic(loc=.123456789, scale=2.0, size=(3, 2)) desired = np.array([[1.09232835305011444, 0.8648196662399954], [4.27818590694950185, 4.33897006346929714], [-0.21682183359214885, 2.63373365386060332]]) assert_array_almost_equal(actual, desired, decimal=15) def test_lognormal(self): - np.random.seed(self.seed) - actual = np.random.lognormal(mean=.123456789, sigma=2.0, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.lognormal(mean=.123456789, sigma=2.0, size=(3, 2)) desired = np.array([[16.50698631688883822, 36.54846706092654784], [22.67886599981281748, 0.71617561058995771], [65.72798501792723869, 86.84341601437161273]]) @@ -753,16 +754,16 @@ def test_lognormal_0(self): assert_raises(ValueError, np.random.lognormal, sigma=-0.) def test_logseries(self): - np.random.seed(self.seed) - actual = np.random.logseries(p=.923456789, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.logseries(p=.923456789, size=(3, 2)) desired = np.array([[2, 2], [6, 17], [3, 6]]) assert_array_equal(actual, desired) def test_multinomial(self): - np.random.seed(self.seed) - actual = np.random.multinomial(20, [1 / 6.] * 6, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.multinomial(20, [1 / 6.] * 6, size=(3, 2)) desired = np.array([[[4, 3, 5, 4, 2, 2], [5, 2, 8, 2, 2, 1]], [[3, 4, 3, 6, 0, 4], @@ -772,11 +773,11 @@ def test_multinomial(self): assert_array_equal(actual, desired) def test_multivariate_normal(self): - np.random.seed(self.seed) + rng = random.RandomState(self.seed) mean = (.123456789, 10) cov = [[1, 0], [0, 1]] size = (3, 2) - actual = np.random.multivariate_normal(mean, cov, size) + actual = rng.multivariate_normal(mean, cov, size) desired = np.array([[[1.463620246718631, 11.73759122771936], [1.622445133300628, 9.771356667546383]], [[2.154490787682787, 12.170324946056553], @@ -787,7 +788,7 @@ def test_multivariate_normal(self): assert_array_almost_equal(actual, desired, decimal=15) # Check for default size, was raising deprecation warning - actual = np.random.multivariate_normal(mean, cov) + actual = rng.multivariate_normal(mean, cov) desired = np.array([0.895289569463708, 9.17180864067987]) assert_array_almost_equal(actual, desired, decimal=15) @@ -795,54 +796,53 @@ def test_multivariate_normal(self): # RuntimeWarning mean = [0, 0] cov = [[1, 2], [2, 1]] - assert_warns(RuntimeWarning, np.random.multivariate_normal, mean, cov) + pytest.warns(RuntimeWarning, rng.multivariate_normal, mean, cov) # and that it doesn't warn with RuntimeWarning check_valid='ignore' - assert_no_warnings(np.random.multivariate_normal, mean, cov, + assert_no_warnings(rng.multivariate_normal, mean, cov, check_valid='ignore') # and that it raises with RuntimeWarning check_valid='raises' - assert_raises(ValueError, np.random.multivariate_normal, mean, cov, + assert_raises(ValueError, rng.multivariate_normal, mean, cov, check_valid='raise') cov = np.array([[1, 0.1], [0.1, 1]], dtype=np.float32) - with suppress_warnings() as sup: - np.random.multivariate_normal(mean, cov) - w = sup.record(RuntimeWarning) - assert len(w) == 0 + with warnings.catch_warnings(): + warnings.simplefilter('error') + rng.multivariate_normal(mean, cov) def test_negative_binomial(self): - np.random.seed(self.seed) - actual = np.random.negative_binomial(n=100, p=.12345, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.negative_binomial(n=100, p=.12345, size=(3, 2)) desired = np.array([[848, 841], [892, 611], [779, 647]]) assert_array_equal(actual, desired) def test_noncentral_chisquare(self): - np.random.seed(self.seed) - actual = np.random.noncentral_chisquare(df=5, nonc=5, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.noncentral_chisquare(df=5, nonc=5, size=(3, 2)) desired = np.array([[23.91905354498517511, 13.35324692733826346], [31.22452661329736401, 16.60047399466177254], [5.03461598262724586, 17.94973089023519464]]) assert_array_almost_equal(actual, desired, decimal=14) - actual = np.random.noncentral_chisquare(df=.5, nonc=.2, size=(3, 2)) + actual = rng.noncentral_chisquare(df=.5, nonc=.2, size=(3, 2)) desired = np.array([[1.47145377828516666, 0.15052899268012659], [0.00943803056963588, 1.02647251615666169], [0.332334982684171, 0.15451287602753125]]) assert_array_almost_equal(actual, desired, decimal=14) - np.random.seed(self.seed) - actual = np.random.noncentral_chisquare(df=5, nonc=0, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.noncentral_chisquare(df=5, nonc=0, size=(3, 2)) desired = np.array([[9.597154162763948, 11.725484450296079], [10.413711048138335, 3.694475922923986], [13.484222138963087, 14.377255424602957]]) assert_array_almost_equal(actual, desired, decimal=14) def test_noncentral_f(self): - np.random.seed(self.seed) - actual = np.random.noncentral_f(dfnum=5, dfden=2, nonc=1, + rng = random.RandomState(self.seed) + actual = rng.noncentral_f(dfnum=5, dfden=2, nonc=1, size=(3, 2)) desired = np.array([[1.40598099674926669, 0.34207973179285761], [3.57715069265772545, 7.92632662577829805], @@ -850,8 +850,8 @@ def test_noncentral_f(self): assert_array_almost_equal(actual, desired, decimal=14) def test_normal(self): - np.random.seed(self.seed) - actual = np.random.normal(loc=.123456789, scale=2.0, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.normal(loc=.123456789, scale=2.0, size=(3, 2)) desired = np.array([[2.80378370443726244, 3.59863924443872163], [3.121433477601256, -0.33382987590723379], [4.18552478636557357, 4.46410668111310471]]) @@ -862,8 +862,8 @@ def test_normal_0(self): assert_raises(ValueError, np.random.normal, scale=-0.) def test_pareto(self): - np.random.seed(self.seed) - actual = np.random.pareto(a=.123456789, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.pareto(a=.123456789, size=(3, 2)) desired = np.array( [[2.46852460439034849e+03, 1.41286880810518346e+03], [5.28287797029485181e+07, 6.57720981047328785e+07], @@ -877,8 +877,8 @@ def test_pareto(self): np.testing.assert_array_almost_equal_nulp(actual, desired, nulp=30) def test_poisson(self): - np.random.seed(self.seed) - actual = np.random.poisson(lam=.123456789, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.poisson(lam=.123456789, size=(3, 2)) desired = np.array([[0, 0], [1, 0], [0, 0]]) @@ -893,16 +893,16 @@ def test_poisson_exceptions(self): assert_raises(ValueError, np.random.poisson, [lambig] * 10) def test_power(self): - np.random.seed(self.seed) - actual = np.random.power(a=.123456789, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.power(a=.123456789, size=(3, 2)) desired = np.array([[0.02048932883240791, 0.01424192241128213], [0.38446073748535298, 0.39499689943484395], [0.00177699707563439, 0.13115505880863756]]) assert_array_almost_equal(actual, desired, decimal=15) def test_rayleigh(self): - np.random.seed(self.seed) - actual = np.random.rayleigh(scale=10, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.rayleigh(scale=10, size=(3, 2)) desired = np.array([[13.8882496494248393, 13.383318339044731], [20.95413364294492098, 21.08285015800712614], [11.06066537006854311, 17.35468505778271009]]) @@ -913,24 +913,24 @@ def test_rayleigh_0(self): assert_raises(ValueError, np.random.rayleigh, scale=-0.) def test_standard_cauchy(self): - np.random.seed(self.seed) - actual = np.random.standard_cauchy(size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.standard_cauchy(size=(3, 2)) desired = np.array([[0.77127660196445336, -6.55601161955910605], [0.93582023391158309, -2.07479293013759447], [-4.74601644297011926, 0.18338989290760804]]) assert_array_almost_equal(actual, desired, decimal=15) def test_standard_exponential(self): - np.random.seed(self.seed) - actual = np.random.standard_exponential(size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.standard_exponential(size=(3, 2)) desired = np.array([[0.96441739162374596, 0.89556604882105506], [2.1953785836319808, 2.22243285392490542], [0.6116915921431676, 1.50592546727413201]]) assert_array_almost_equal(actual, desired, decimal=15) def test_standard_gamma(self): - np.random.seed(self.seed) - actual = np.random.standard_gamma(shape=3, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.standard_gamma(shape=3, size=(3, 2)) desired = np.array([[5.50841531318455058, 6.62953470301903103], [5.93988484943779227, 2.31044849402133989], [7.54838614231317084, 8.012756093271868]]) @@ -941,24 +941,24 @@ def test_standard_gamma_0(self): assert_raises(ValueError, np.random.standard_gamma, shape=-0.) def test_standard_normal(self): - np.random.seed(self.seed) - actual = np.random.standard_normal(size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.standard_normal(size=(3, 2)) desired = np.array([[1.34016345771863121, 1.73759122771936081], [1.498988344300628, -0.2286433324536169], [2.031033998682787, 2.17032494605655257]]) assert_array_almost_equal(actual, desired, decimal=15) def test_standard_t(self): - np.random.seed(self.seed) - actual = np.random.standard_t(df=10, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.standard_t(df=10, size=(3, 2)) desired = np.array([[0.97140611862659965, -0.08830486548450577], [1.36311143689505321, -0.55317463909867071], [-0.18473749069684214, 0.61181537341755321]]) assert_array_almost_equal(actual, desired, decimal=15) def test_triangular(self): - np.random.seed(self.seed) - actual = np.random.triangular(left=5.12, mode=10.23, right=20.34, + rng = random.RandomState(self.seed) + actual = rng.triangular(left=5.12, mode=10.23, right=20.34, size=(3, 2)) desired = np.array([[12.68117178949215784, 12.4129206149193152], [16.20131377335158263, 16.25692138747600524], @@ -966,8 +966,8 @@ def test_triangular(self): assert_array_almost_equal(actual, desired, decimal=14) def test_uniform(self): - np.random.seed(self.seed) - actual = np.random.uniform(low=1.23, high=10.54, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.uniform(low=1.23, high=10.54, size=(3, 2)) desired = np.array([[6.99097932346268003, 6.73801597444323974], [9.50364421400426274, 9.53130618907631089], [5.48995325769805476, 8.47493103280052118]]) @@ -1014,8 +1014,8 @@ def __int__(self): assert_raises(TypeError, np.random.hypergeometric, throwing_int, 1, 1) def test_vonmises(self): - np.random.seed(self.seed) - actual = np.random.vonmises(mu=1.23, kappa=1.54, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.vonmises(mu=1.23, kappa=1.54, size=(3, 2)) desired = np.array([[2.28567572673902042, 2.89163838442285037], [0.38198375564286025, 2.57638023113890746], [1.19153771588353052, 1.83509849681825354]]) @@ -1028,16 +1028,16 @@ def test_vonmises_small(self): np.testing.assert_(np.isfinite(r).all()) def test_wald(self): - np.random.seed(self.seed) - actual = np.random.wald(mean=1.23, scale=1.54, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.wald(mean=1.23, scale=1.54, size=(3, 2)) desired = np.array([[3.82935265715889983, 5.13125249184285526], [0.35045403618358717, 1.50832396872003538], [0.24124319895843183, 0.22031101461955038]]) assert_array_almost_equal(actual, desired, decimal=14) def test_weibull(self): - np.random.seed(self.seed) - actual = np.random.weibull(a=1.23, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.weibull(a=1.23, size=(3, 2)) desired = np.array([[0.97097342648766727, 0.91422896443565516], [1.89517770034962929, 1.91414357960479564], [0.67057783752390987, 1.39494046635066793]]) @@ -1049,8 +1049,8 @@ def test_weibull_0(self): assert_raises(ValueError, np.random.weibull, a=-0.) def test_zipf(self): - np.random.seed(self.seed) - actual = np.random.zipf(a=1.23, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.zipf(a=1.23, size=(3, 2)) desired = np.array([[66, 29], [1, 1], [3, 13]]) @@ -1060,11 +1060,7 @@ def test_zipf(self): class TestBroadcast: # tests that functions that broadcast behave # correctly when presented with non-scalar arguments - def setup_method(self): - self.seed = 123456789 - - def setSeed(self): - np.random.seed(self.seed) + seed = 123456789 # TODO: Include test for randint once it can broadcast # Can steal the test written in PR #6938 @@ -1072,129 +1068,122 @@ def setSeed(self): def test_uniform(self): low = [0] high = [1] - uniform = np.random.uniform desired = np.array([0.53283302478975902, 0.53413660089041659, 0.50955303552646702]) - self.setSeed() - actual = uniform(low * 3, high) + rng = random.RandomState(self.seed) + actual = rng.uniform(low * 3, high) assert_array_almost_equal(actual, desired, decimal=14) - self.setSeed() - actual = uniform(low, high * 3) + rng = random.RandomState(self.seed) + actual = rng.uniform(low, high * 3) assert_array_almost_equal(actual, desired, decimal=14) def test_normal(self): loc = [0] scale = [1] bad_scale = [-1] - normal = np.random.normal desired = np.array([2.2129019979039612, 2.1283977976520019, 1.8417114045748335]) - self.setSeed() - actual = normal(loc * 3, scale) + rng = random.RandomState(self.seed) + actual = rng.normal(loc * 3, scale) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, normal, loc * 3, bad_scale) + assert_raises(ValueError, rng.normal, loc * 3, bad_scale) - self.setSeed() - actual = normal(loc, scale * 3) + rng = random.RandomState(self.seed) + actual = rng.normal(loc, scale * 3) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, normal, loc, bad_scale * 3) + assert_raises(ValueError, rng.normal, loc, bad_scale * 3) def test_beta(self): a = [1] b = [2] bad_a = [-1] bad_b = [-2] - beta = np.random.beta desired = np.array([0.19843558305989056, 0.075230336409423643, 0.24976865978980844]) - self.setSeed() - actual = beta(a * 3, b) + rng = random.RandomState(self.seed) + actual = rng.beta(a * 3, b) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, beta, bad_a * 3, b) - assert_raises(ValueError, beta, a * 3, bad_b) + assert_raises(ValueError, rng.beta, bad_a * 3, b) + assert_raises(ValueError, rng.beta, a * 3, bad_b) - self.setSeed() - actual = beta(a, b * 3) + rng = random.RandomState(self.seed) + actual = rng.beta(a, b * 3) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, beta, bad_a, b * 3) - assert_raises(ValueError, beta, a, bad_b * 3) + assert_raises(ValueError, rng.beta, bad_a, b * 3) + assert_raises(ValueError, rng.beta, a, bad_b * 3) def test_exponential(self): scale = [1] bad_scale = [-1] - exponential = np.random.exponential desired = np.array([0.76106853658845242, 0.76386282278691653, 0.71243813125891797]) - self.setSeed() - actual = exponential(scale * 3) + rng = random.RandomState(self.seed) + actual = rng.exponential(scale * 3) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, exponential, bad_scale * 3) + assert_raises(ValueError, rng.exponential, bad_scale * 3) def test_standard_gamma(self): shape = [1] bad_shape = [-1] - std_gamma = np.random.standard_gamma desired = np.array([0.76106853658845242, 0.76386282278691653, 0.71243813125891797]) - self.setSeed() - actual = std_gamma(shape * 3) + rng = random.RandomState(self.seed) + actual = rng.standard_gamma(shape * 3) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, std_gamma, bad_shape * 3) + assert_raises(ValueError, rng.standard_gamma, bad_shape * 3) def test_gamma(self): shape = [1] scale = [2] bad_shape = [-1] bad_scale = [-2] - gamma = np.random.gamma desired = np.array([1.5221370731769048, 1.5277256455738331, 1.4248762625178359]) - self.setSeed() - actual = gamma(shape * 3, scale) + rng = random.RandomState(self.seed) + actual = rng.gamma(shape * 3, scale) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, gamma, bad_shape * 3, scale) - assert_raises(ValueError, gamma, shape * 3, bad_scale) + assert_raises(ValueError, rng.gamma, bad_shape * 3, scale) + assert_raises(ValueError, rng.gamma, shape * 3, bad_scale) - self.setSeed() - actual = gamma(shape, scale * 3) + rng = random.RandomState(self.seed) + actual = rng.gamma(shape, scale * 3) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, gamma, bad_shape, scale * 3) - assert_raises(ValueError, gamma, shape, bad_scale * 3) + assert_raises(ValueError, rng.gamma, bad_shape, scale * 3) + assert_raises(ValueError, rng.gamma, shape, bad_scale * 3) def test_f(self): dfnum = [1] dfden = [2] bad_dfnum = [-1] bad_dfden = [-2] - f = np.random.f desired = np.array([0.80038951638264799, 0.86768719635363512, 2.7251095168386801]) - self.setSeed() - actual = f(dfnum * 3, dfden) + rng = random.RandomState(self.seed) + actual = rng.f(dfnum * 3, dfden) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, f, bad_dfnum * 3, dfden) - assert_raises(ValueError, f, dfnum * 3, bad_dfden) + assert_raises(ValueError, rng.f, bad_dfnum * 3, dfden) + assert_raises(ValueError, rng.f, dfnum * 3, bad_dfden) - self.setSeed() - actual = f(dfnum, dfden * 3) + rng = random.RandomState(self.seed) + actual = rng.f(dfnum, dfden * 3) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, f, bad_dfnum, dfden * 3) - assert_raises(ValueError, f, dfnum, bad_dfden * 3) + assert_raises(ValueError, rng.f, bad_dfnum, dfden * 3) + assert_raises(ValueError, rng.f, dfnum, bad_dfden * 3) def test_noncentral_f(self): dfnum = [2] @@ -1203,256 +1192,242 @@ def test_noncentral_f(self): bad_dfnum = [0] bad_dfden = [-1] bad_nonc = [-2] - nonc_f = np.random.noncentral_f desired = np.array([9.1393943263705211, 13.025456344595602, 8.8018098359100545]) - self.setSeed() - actual = nonc_f(dfnum * 3, dfden, nonc) + rng = random.RandomState(self.seed) + actual = rng.noncentral_f(dfnum * 3, dfden, nonc) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, nonc_f, bad_dfnum * 3, dfden, nonc) - assert_raises(ValueError, nonc_f, dfnum * 3, bad_dfden, nonc) - assert_raises(ValueError, nonc_f, dfnum * 3, dfden, bad_nonc) + assert_raises(ValueError, rng.noncentral_f, bad_dfnum * 3, dfden, nonc) + assert_raises(ValueError, rng.noncentral_f, dfnum * 3, bad_dfden, nonc) + assert_raises(ValueError, rng.noncentral_f, dfnum * 3, dfden, bad_nonc) - self.setSeed() - actual = nonc_f(dfnum, dfden * 3, nonc) + rng = random.RandomState(self.seed) + actual = rng.noncentral_f(dfnum, dfden * 3, nonc) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, nonc_f, bad_dfnum, dfden * 3, nonc) - assert_raises(ValueError, nonc_f, dfnum, bad_dfden * 3, nonc) - assert_raises(ValueError, nonc_f, dfnum, dfden * 3, bad_nonc) + assert_raises(ValueError, rng.noncentral_f, bad_dfnum, dfden * 3, nonc) + assert_raises(ValueError, rng.noncentral_f, dfnum, bad_dfden * 3, nonc) + assert_raises(ValueError, rng.noncentral_f, dfnum, dfden * 3, bad_nonc) - self.setSeed() - actual = nonc_f(dfnum, dfden, nonc * 3) + rng = random.RandomState(self.seed) + actual = rng.noncentral_f(dfnum, dfden, nonc * 3) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, nonc_f, bad_dfnum, dfden, nonc * 3) - assert_raises(ValueError, nonc_f, dfnum, bad_dfden, nonc * 3) - assert_raises(ValueError, nonc_f, dfnum, dfden, bad_nonc * 3) + assert_raises(ValueError, rng.noncentral_f, bad_dfnum, dfden, nonc * 3) + assert_raises(ValueError, rng.noncentral_f, dfnum, bad_dfden, nonc * 3) + assert_raises(ValueError, rng.noncentral_f, dfnum, dfden, bad_nonc * 3) def test_noncentral_f_small_df(self): - self.setSeed() + rng = random.RandomState(self.seed) desired = np.array([6.869638627492048, 0.785880199263955]) - actual = np.random.noncentral_f(0.9, 0.9, 2, size=2) + actual = rng.noncentral_f(0.9, 0.9, 2, size=2) assert_array_almost_equal(actual, desired, decimal=14) def test_chisquare(self): df = [1] bad_df = [-1] - chisquare = np.random.chisquare desired = np.array([0.57022801133088286, 0.51947702108840776, 0.1320969254923558]) - self.setSeed() - actual = chisquare(df * 3) + rng = random.RandomState(self.seed) + actual = rng.chisquare(df * 3) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, chisquare, bad_df * 3) + assert_raises(ValueError, rng.chisquare, bad_df * 3) def test_noncentral_chisquare(self): df = [1] nonc = [2] bad_df = [-1] bad_nonc = [-2] - nonc_chi = np.random.noncentral_chisquare desired = np.array([9.0015599467913763, 4.5804135049718742, 6.0872302432834564]) - self.setSeed() - actual = nonc_chi(df * 3, nonc) + rng = random.RandomState(self.seed) + actual = rng.noncentral_chisquare(df * 3, nonc) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, nonc_chi, bad_df * 3, nonc) - assert_raises(ValueError, nonc_chi, df * 3, bad_nonc) + assert_raises(ValueError, rng.noncentral_chisquare, bad_df * 3, nonc) + assert_raises(ValueError, rng.noncentral_chisquare, df * 3, bad_nonc) - self.setSeed() - actual = nonc_chi(df, nonc * 3) + rng = random.RandomState(self.seed) + actual = rng.noncentral_chisquare(df, nonc * 3) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, nonc_chi, bad_df, nonc * 3) - assert_raises(ValueError, nonc_chi, df, bad_nonc * 3) + assert_raises(ValueError, rng.noncentral_chisquare, bad_df, nonc * 3) + assert_raises(ValueError, rng.noncentral_chisquare, df, bad_nonc * 3) def test_standard_t(self): df = [1] bad_df = [-1] - t = np.random.standard_t desired = np.array([3.0702872575217643, 5.8560725167361607, 1.0274791436474273]) - self.setSeed() - actual = t(df * 3) + rng = random.RandomState(self.seed) + actual = rng.standard_t(df * 3) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, t, bad_df * 3) + assert_raises(ValueError, rng.standard_t, bad_df * 3) def test_vonmises(self): mu = [2] kappa = [1] bad_kappa = [-1] - vonmises = np.random.vonmises desired = np.array([2.9883443664201312, -2.7064099483995943, -1.8672476700665914]) - self.setSeed() - actual = vonmises(mu * 3, kappa) + rng = random.RandomState(self.seed) + actual = rng.vonmises(mu * 3, kappa) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, vonmises, mu * 3, bad_kappa) + assert_raises(ValueError, rng.vonmises, mu * 3, bad_kappa) - self.setSeed() - actual = vonmises(mu, kappa * 3) + rng = random.RandomState(self.seed) + actual = rng.vonmises(mu, kappa * 3) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, vonmises, mu, bad_kappa * 3) + assert_raises(ValueError, rng.vonmises, mu, bad_kappa * 3) def test_pareto(self): a = [1] bad_a = [-1] - pareto = np.random.pareto desired = np.array([1.1405622680198362, 1.1465519762044529, 1.0389564467453547]) - self.setSeed() - actual = pareto(a * 3) + rng = random.RandomState(self.seed) + actual = rng.pareto(a * 3) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, pareto, bad_a * 3) + assert_raises(ValueError, rng.pareto, bad_a * 3) def test_weibull(self): a = [1] bad_a = [-1] - weibull = np.random.weibull desired = np.array([0.76106853658845242, 0.76386282278691653, 0.71243813125891797]) - self.setSeed() - actual = weibull(a * 3) + rng = random.RandomState(self.seed) + actual = rng.weibull(a * 3) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, weibull, bad_a * 3) + assert_raises(ValueError, rng.weibull, bad_a * 3) def test_power(self): a = [1] bad_a = [-1] - power = np.random.power desired = np.array([0.53283302478975902, 0.53413660089041659, 0.50955303552646702]) - self.setSeed() - actual = power(a * 3) + rng = random.RandomState(self.seed) + actual = rng.power(a * 3) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, power, bad_a * 3) + assert_raises(ValueError, rng.power, bad_a * 3) def test_laplace(self): loc = [0] scale = [1] bad_scale = [-1] - laplace = np.random.laplace desired = np.array([0.067921356028507157, 0.070715642226971326, 0.019290950698972624]) - self.setSeed() - actual = laplace(loc * 3, scale) + rng = random.RandomState(self.seed) + actual = rng.laplace(loc * 3, scale) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, laplace, loc * 3, bad_scale) + assert_raises(ValueError, rng.laplace, loc * 3, bad_scale) - self.setSeed() - actual = laplace(loc, scale * 3) + rng = random.RandomState(self.seed) + actual = rng.laplace(loc, scale * 3) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, laplace, loc, bad_scale * 3) + assert_raises(ValueError, rng.laplace, loc, bad_scale * 3) def test_gumbel(self): loc = [0] scale = [1] bad_scale = [-1] - gumbel = np.random.gumbel desired = np.array([0.2730318639556768, 0.26936705726291116, 0.33906220393037939]) - self.setSeed() - actual = gumbel(loc * 3, scale) + rng = random.RandomState(self.seed) + actual = rng.gumbel(loc * 3, scale) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, gumbel, loc * 3, bad_scale) + assert_raises(ValueError, rng.gumbel, loc * 3, bad_scale) - self.setSeed() - actual = gumbel(loc, scale * 3) + rng = random.RandomState(self.seed) + actual = rng.gumbel(loc, scale * 3) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, gumbel, loc, bad_scale * 3) + assert_raises(ValueError, rng.gumbel, loc, bad_scale * 3) def test_logistic(self): loc = [0] scale = [1] bad_scale = [-1] - logistic = np.random.logistic desired = np.array([0.13152135837586171, 0.13675915696285773, 0.038216792802833396]) - self.setSeed() - actual = logistic(loc * 3, scale) + rng = random.RandomState(self.seed) + actual = rng.logistic(loc * 3, scale) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, logistic, loc * 3, bad_scale) + assert_raises(ValueError, rng.logistic, loc * 3, bad_scale) - self.setSeed() - actual = logistic(loc, scale * 3) + rng = random.RandomState(self.seed) + actual = rng.logistic(loc, scale * 3) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, logistic, loc, bad_scale * 3) + assert_raises(ValueError, rng.logistic, loc, bad_scale * 3) def test_lognormal(self): mean = [0] sigma = [1] bad_sigma = [-1] - lognormal = np.random.lognormal desired = np.array([9.1422086044848427, 8.4013952870126261, 6.3073234116578671]) - self.setSeed() - actual = lognormal(mean * 3, sigma) + rng = random.RandomState(self.seed) + actual = rng.lognormal(mean * 3, sigma) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, lognormal, mean * 3, bad_sigma) + assert_raises(ValueError, rng.lognormal, mean * 3, bad_sigma) - self.setSeed() - actual = lognormal(mean, sigma * 3) + rng = random.RandomState(self.seed) + actual = rng.lognormal(mean, sigma * 3) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, lognormal, mean, bad_sigma * 3) + assert_raises(ValueError, rng.lognormal, mean, bad_sigma * 3) def test_rayleigh(self): scale = [1] bad_scale = [-1] - rayleigh = np.random.rayleigh desired = np.array([1.2337491937897689, 1.2360119924878694, 1.1936818095781789]) - self.setSeed() - actual = rayleigh(scale * 3) + rng = random.RandomState(self.seed) + actual = rng.rayleigh(scale * 3) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, rayleigh, bad_scale * 3) + assert_raises(ValueError, rng.rayleigh, bad_scale * 3) def test_wald(self): mean = [0.5] scale = [1] bad_mean = [0] bad_scale = [-2] - wald = np.random.wald desired = np.array([0.11873681120271318, 0.12450084820795027, 0.9096122728408238]) - self.setSeed() - actual = wald(mean * 3, scale) + rng = random.RandomState(self.seed) + actual = rng.wald(mean * 3, scale) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, wald, bad_mean * 3, scale) - assert_raises(ValueError, wald, mean * 3, bad_scale) + assert_raises(ValueError, rng.wald, bad_mean * 3, scale) + assert_raises(ValueError, rng.wald, mean * 3, bad_scale) - self.setSeed() - actual = wald(mean, scale * 3) + rng = random.RandomState(self.seed) + actual = rng.wald(mean, scale * 3) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, wald, bad_mean, scale * 3) - assert_raises(ValueError, wald, mean, bad_scale * 3) - assert_raises(ValueError, wald, 0.0, 1) - assert_raises(ValueError, wald, 0.5, 0.0) + assert_raises(ValueError, rng.wald, bad_mean, scale * 3) + assert_raises(ValueError, rng.wald, mean, bad_scale * 3) + assert_raises(ValueError, rng.wald, 0.0, 1) + assert_raises(ValueError, rng.wald, 0.5, 0.0) def test_triangular(self): left = [1] @@ -1461,33 +1436,32 @@ def test_triangular(self): bad_left_one = [3] bad_mode_one = [4] bad_left_two, bad_mode_two = right * 2 - triangular = np.random.triangular desired = np.array([2.03339048710429, 2.0347400359389356, 2.0095991069536208]) - self.setSeed() - actual = triangular(left * 3, mode, right) + rng = random.RandomState(self.seed) + actual = rng.triangular(left * 3, mode, right) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, triangular, bad_left_one * 3, mode, right) - assert_raises(ValueError, triangular, left * 3, bad_mode_one, right) - assert_raises(ValueError, triangular, bad_left_two * 3, bad_mode_two, + assert_raises(ValueError, rng.triangular, bad_left_one * 3, mode, right) + assert_raises(ValueError, rng.triangular, left * 3, bad_mode_one, right) + assert_raises(ValueError, rng.triangular, bad_left_two * 3, bad_mode_two, right) - self.setSeed() - actual = triangular(left, mode * 3, right) + rng = random.RandomState(self.seed) + actual = rng.triangular(left, mode * 3, right) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, triangular, bad_left_one, mode * 3, right) - assert_raises(ValueError, triangular, left, bad_mode_one * 3, right) - assert_raises(ValueError, triangular, bad_left_two, bad_mode_two * 3, + assert_raises(ValueError, rng.triangular, bad_left_one, mode * 3, right) + assert_raises(ValueError, rng.triangular, left, bad_mode_one * 3, right) + assert_raises(ValueError, rng.triangular, bad_left_two, bad_mode_two * 3, right) - self.setSeed() - actual = triangular(left, mode, right * 3) + rng = random.RandomState(self.seed) + actual = rng.triangular(left, mode, right * 3) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, triangular, bad_left_one, mode, right * 3) - assert_raises(ValueError, triangular, left, bad_mode_one, right * 3) - assert_raises(ValueError, triangular, bad_left_two, bad_mode_two, + assert_raises(ValueError, rng.triangular, bad_left_one, mode, right * 3) + assert_raises(ValueError, rng.triangular, left, bad_mode_one, right * 3) + assert_raises(ValueError, rng.triangular, bad_left_two, bad_mode_two, right * 3) def test_binomial(self): @@ -1496,22 +1470,21 @@ def test_binomial(self): bad_n = [-1] bad_p_one = [-1] bad_p_two = [1.5] - binom = np.random.binomial desired = np.array([1, 1, 1]) - self.setSeed() - actual = binom(n * 3, p) + rng = random.RandomState(self.seed) + actual = rng.binomial(n * 3, p) assert_array_equal(actual, desired) - assert_raises(ValueError, binom, bad_n * 3, p) - assert_raises(ValueError, binom, n * 3, bad_p_one) - assert_raises(ValueError, binom, n * 3, bad_p_two) + assert_raises(ValueError, rng.binomial, bad_n * 3, p) + assert_raises(ValueError, rng.binomial, n * 3, bad_p_one) + assert_raises(ValueError, rng.binomial, n * 3, bad_p_two) - self.setSeed() - actual = binom(n, p * 3) + rng = random.RandomState(self.seed) + actual = rng.binomial(n, p * 3) assert_array_equal(actual, desired) - assert_raises(ValueError, binom, bad_n, p * 3) - assert_raises(ValueError, binom, n, bad_p_one * 3) - assert_raises(ValueError, binom, n, bad_p_two * 3) + assert_raises(ValueError, rng.binomial, bad_n, p * 3) + assert_raises(ValueError, rng.binomial, n, bad_p_one * 3) + assert_raises(ValueError, rng.binomial, n, bad_p_two * 3) def test_negative_binomial(self): n = [1] @@ -1519,22 +1492,21 @@ def test_negative_binomial(self): bad_n = [-1] bad_p_one = [-1] bad_p_two = [1.5] - neg_binom = np.random.negative_binomial desired = np.array([1, 0, 1]) - self.setSeed() - actual = neg_binom(n * 3, p) + rng = random.RandomState(self.seed) + actual = rng.negative_binomial(n * 3, p) assert_array_equal(actual, desired) - assert_raises(ValueError, neg_binom, bad_n * 3, p) - assert_raises(ValueError, neg_binom, n * 3, bad_p_one) - assert_raises(ValueError, neg_binom, n * 3, bad_p_two) + assert_raises(ValueError, rng.negative_binomial, bad_n * 3, p) + assert_raises(ValueError, rng.negative_binomial, n * 3, bad_p_one) + assert_raises(ValueError, rng.negative_binomial, n * 3, bad_p_two) - self.setSeed() - actual = neg_binom(n, p * 3) + rng = random.RandomState(self.seed) + actual = rng.negative_binomial(n, p * 3) assert_array_equal(actual, desired) - assert_raises(ValueError, neg_binom, bad_n, p * 3) - assert_raises(ValueError, neg_binom, n, bad_p_one * 3) - assert_raises(ValueError, neg_binom, n, bad_p_two * 3) + assert_raises(ValueError, rng.negative_binomial, bad_n, p * 3) + assert_raises(ValueError, rng.negative_binomial, n, bad_p_one * 3) + assert_raises(ValueError, rng.negative_binomial, n, bad_p_two * 3) def test_poisson(self): max_lam = np.random.RandomState()._poisson_lam_max @@ -1542,41 +1514,38 @@ def test_poisson(self): lam = [1] bad_lam_one = [-1] bad_lam_two = [max_lam * 2] - poisson = np.random.poisson desired = np.array([1, 1, 0]) - self.setSeed() - actual = poisson(lam * 3) + rng = random.RandomState(self.seed) + actual = rng.poisson(lam * 3) assert_array_equal(actual, desired) - assert_raises(ValueError, poisson, bad_lam_one * 3) - assert_raises(ValueError, poisson, bad_lam_two * 3) + assert_raises(ValueError, rng.poisson, bad_lam_one * 3) + assert_raises(ValueError, rng.poisson, bad_lam_two * 3) def test_zipf(self): a = [2] bad_a = [0] - zipf = np.random.zipf desired = np.array([2, 2, 1]) - self.setSeed() - actual = zipf(a * 3) + rng = random.RandomState(self.seed) + actual = rng.zipf(a * 3) assert_array_equal(actual, desired) - assert_raises(ValueError, zipf, bad_a * 3) + assert_raises(ValueError, rng.zipf, bad_a * 3) with np.errstate(invalid='ignore'): - assert_raises(ValueError, zipf, np.nan) - assert_raises(ValueError, zipf, [0, 0, np.nan]) + assert_raises(ValueError, rng.zipf, np.nan) + assert_raises(ValueError, rng.zipf, [0, 0, np.nan]) def test_geometric(self): p = [0.5] bad_p_one = [-1] bad_p_two = [1.5] - geom = np.random.geometric desired = np.array([2, 2, 2]) - self.setSeed() - actual = geom(p * 3) + rng = random.RandomState(self.seed) + actual = rng.geometric(p * 3) assert_array_equal(actual, desired) - assert_raises(ValueError, geom, bad_p_one * 3) - assert_raises(ValueError, geom, bad_p_two * 3) + assert_raises(ValueError, rng.geometric, bad_p_one * 3) + assert_raises(ValueError, rng.geometric, bad_p_two * 3) def test_hypergeometric(self): ngood = [1] @@ -1586,52 +1555,49 @@ def test_hypergeometric(self): bad_nbad = [-2] bad_nsample_one = [0] bad_nsample_two = [4] - hypergeom = np.random.hypergeometric desired = np.array([1, 1, 1]) - self.setSeed() - actual = hypergeom(ngood * 3, nbad, nsample) + rng = random.RandomState(self.seed) + actual = rng.hypergeometric(ngood * 3, nbad, nsample) assert_array_equal(actual, desired) - assert_raises(ValueError, hypergeom, bad_ngood * 3, nbad, nsample) - assert_raises(ValueError, hypergeom, ngood * 3, bad_nbad, nsample) - assert_raises(ValueError, hypergeom, ngood * 3, nbad, bad_nsample_one) - assert_raises(ValueError, hypergeom, ngood * 3, nbad, bad_nsample_two) + assert_raises(ValueError, rng.hypergeometric, bad_ngood * 3, nbad, nsample) + assert_raises(ValueError, rng.hypergeometric, ngood * 3, bad_nbad, nsample) + assert_raises(ValueError, rng.hypergeometric, ngood * 3, nbad, bad_nsample_one) + assert_raises(ValueError, rng.hypergeometric, ngood * 3, nbad, bad_nsample_two) - self.setSeed() - actual = hypergeom(ngood, nbad * 3, nsample) + rng = random.RandomState(self.seed) + actual = rng.hypergeometric(ngood, nbad * 3, nsample) assert_array_equal(actual, desired) - assert_raises(ValueError, hypergeom, bad_ngood, nbad * 3, nsample) - assert_raises(ValueError, hypergeom, ngood, bad_nbad * 3, nsample) - assert_raises(ValueError, hypergeom, ngood, nbad * 3, bad_nsample_one) - assert_raises(ValueError, hypergeom, ngood, nbad * 3, bad_nsample_two) + assert_raises(ValueError, rng.hypergeometric, bad_ngood, nbad * 3, nsample) + assert_raises(ValueError, rng.hypergeometric, ngood, bad_nbad * 3, nsample) + assert_raises(ValueError, rng.hypergeometric, ngood, nbad * 3, bad_nsample_one) + assert_raises(ValueError, rng.hypergeometric, ngood, nbad * 3, bad_nsample_two) - self.setSeed() - actual = hypergeom(ngood, nbad, nsample * 3) + rng = random.RandomState(self.seed) + actual = rng.hypergeometric(ngood, nbad, nsample * 3) assert_array_equal(actual, desired) - assert_raises(ValueError, hypergeom, bad_ngood, nbad, nsample * 3) - assert_raises(ValueError, hypergeom, ngood, bad_nbad, nsample * 3) - assert_raises(ValueError, hypergeom, ngood, nbad, bad_nsample_one * 3) - assert_raises(ValueError, hypergeom, ngood, nbad, bad_nsample_two * 3) + assert_raises(ValueError, rng.hypergeometric, bad_ngood, nbad, nsample * 3) + assert_raises(ValueError, rng.hypergeometric, ngood, bad_nbad, nsample * 3) + assert_raises(ValueError, rng.hypergeometric, ngood, nbad, bad_nsample_one * 3) + assert_raises(ValueError, rng.hypergeometric, ngood, nbad, bad_nsample_two * 3) def test_logseries(self): p = [0.5] bad_p_one = [2] bad_p_two = [-1] - logseries = np.random.logseries desired = np.array([1, 1, 1]) - self.setSeed() - actual = logseries(p * 3) + rng = random.RandomState(self.seed) + actual = rng.logseries(p * 3) assert_array_equal(actual, desired) - assert_raises(ValueError, logseries, bad_p_one * 3) - assert_raises(ValueError, logseries, bad_p_two * 3) + assert_raises(ValueError, rng.logseries, bad_p_one * 3) + assert_raises(ValueError, rng.logseries, bad_p_two * 3) @pytest.mark.skipif(IS_WASM, reason="can't start thread") class TestThread: # make sure each state produces the same sequence even in threads - def setup_method(self): - self.seeds = range(4) + seeds = range(4) def check_function(self, function, sz): from threading import Thread @@ -1673,13 +1639,11 @@ def gen_random(state, out): # See Issue #4263 class TestSingleEltArrayInput: - def setup_method(self): - self.argOne = np.array([2]) - self.argTwo = np.array([3]) - self.argThree = np.array([4]) - self.tgtShape = (1,) + def _create_arrays(self): + return np.array([2]), np.array([3]), np.array([4]), (1,) def test_one_arg_funcs(self): + argOne, _, _, tgtShape = self._create_arrays() funcs = (np.random.exponential, np.random.standard_gamma, np.random.chisquare, np.random.standard_t, np.random.pareto, np.random.weibull, @@ -1694,11 +1658,12 @@ def test_one_arg_funcs(self): out = func(np.array([0.5])) else: - out = func(self.argOne) + out = func(argOne) - assert_equal(out.shape, self.tgtShape) + assert_equal(out.shape, tgtShape) def test_two_arg_funcs(self): + argOne, argTwo, _, tgtShape = self._create_arrays() funcs = (np.random.uniform, np.random.normal, np.random.beta, np.random.gamma, np.random.f, np.random.noncentral_chisquare, @@ -1714,18 +1679,19 @@ def test_two_arg_funcs(self): argTwo = np.array([0.5]) else: - argTwo = self.argTwo + argTwo = argTwo - out = func(self.argOne, argTwo) - assert_equal(out.shape, self.tgtShape) + out = func(argOne, argTwo) + assert_equal(out.shape, tgtShape) - out = func(self.argOne[0], argTwo) - assert_equal(out.shape, self.tgtShape) + out = func(argOne[0], argTwo) + assert_equal(out.shape, tgtShape) - out = func(self.argOne, argTwo[0]) - assert_equal(out.shape, self.tgtShape) + out = func(argOne, argTwo[0]) + assert_equal(out.shape, tgtShape) def test_randint(self): + _, _, _, tgtShape = self._create_arrays() itype = [bool, np.int8, np.uint8, np.int16, np.uint16, np.int32, np.uint32, np.int64, np.uint64] func = np.random.randint @@ -1734,24 +1700,25 @@ def test_randint(self): for dt in itype: out = func(low, high, dtype=dt) - assert_equal(out.shape, self.tgtShape) + assert_equal(out.shape, tgtShape) out = func(low[0], high, dtype=dt) - assert_equal(out.shape, self.tgtShape) + assert_equal(out.shape, tgtShape) out = func(low, high[0], dtype=dt) - assert_equal(out.shape, self.tgtShape) + assert_equal(out.shape, tgtShape) def test_three_arg_funcs(self): + argOne, argTwo, argThree, tgtShape = self._create_arrays() funcs = [np.random.noncentral_f, np.random.triangular, np.random.hypergeometric] for func in funcs: - out = func(self.argOne, self.argTwo, self.argThree) - assert_equal(out.shape, self.tgtShape) + out = func(argOne, argTwo, argThree) + assert_equal(out.shape, tgtShape) - out = func(self.argOne[0], self.argTwo, self.argThree) - assert_equal(out.shape, self.tgtShape) + out = func(argOne[0], argTwo, argThree) + assert_equal(out.shape, tgtShape) - out = func(self.argOne, self.argTwo[0], self.argThree) - assert_equal(out.shape, self.tgtShape) + out = func(argOne, argTwo[0], argThree) + assert_equal(out.shape, tgtShape) diff --git a/numpy/random/tests/test_randomstate.py b/numpy/random/tests/test_randomstate.py index cf4488543c12..63ffb5a86389 100644 --- a/numpy/random/tests/test_randomstate.py +++ b/numpy/random/tests/test_randomstate.py @@ -16,8 +16,6 @@ assert_equal, assert_no_warnings, assert_raises, - assert_warns, - suppress_warnings, ) INT_FUNCS = {'binomial': (100.0, 0.6), @@ -175,10 +173,10 @@ def test_p_non_contiguous(self): p = np.arange(15.) p /= np.sum(p[1::3]) pvals = p[1::3] - random.seed(1432985819) - non_contig = random.multinomial(100, pvals=pvals) - random.seed(1432985819) - contig = random.multinomial(100, pvals=np.ascontiguousarray(pvals)) + rng = random.RandomState(1432985819) + non_contig = rng.multinomial(100, pvals=pvals) + rng = random.RandomState(1432985819) + contig = rng.multinomial(100, pvals=np.ascontiguousarray(pvals)) assert_array_equal(non_contig, contig) def test_multinomial_pvals_float32(self): @@ -193,136 +191,146 @@ def test_multinomial_n_float(self): # Non-index integer types should gracefully truncate floats random.multinomial(100.5, [0.2, 0.8]) + class TestSetState: - def setup_method(self): - self.seed = 1234567890 - self.random_state = random.RandomState(self.seed) - self.state = self.random_state.get_state() + def _create_state(self): + seed = 1234567890 + random_state = random.RandomState(seed) + state = random_state.get_state() + return random_state, state def test_basic(self): - old = self.random_state.tomaxint(16) - self.random_state.set_state(self.state) - new = self.random_state.tomaxint(16) + random_state, state = self._create_state() + old = random_state.tomaxint(16) + random_state.set_state(state) + new = random_state.tomaxint(16) assert_(np.all(old == new)) def test_gaussian_reset(self): # Make sure the cached every-other-Gaussian is reset. - old = self.random_state.standard_normal(size=3) - self.random_state.set_state(self.state) - new = self.random_state.standard_normal(size=3) + random_state, state = self._create_state() + old = random_state.standard_normal(size=3) + random_state.set_state(state) + new = random_state.standard_normal(size=3) assert_(np.all(old == new)) def test_gaussian_reset_in_media_res(self): # When the state is saved with a cached Gaussian, make sure the # cached Gaussian is restored. - - self.random_state.standard_normal() - state = self.random_state.get_state() - old = self.random_state.standard_normal(size=3) - self.random_state.set_state(state) - new = self.random_state.standard_normal(size=3) + random_state, state = self._create_state() + random_state.standard_normal() + state = random_state.get_state() + old = random_state.standard_normal(size=3) + random_state.set_state(state) + new = random_state.standard_normal(size=3) assert_(np.all(old == new)) def test_backwards_compatibility(self): # Make sure we can accept old state tuples that do not have the # cached Gaussian value. - old_state = self.state[:-2] - x1 = self.random_state.standard_normal(size=16) - self.random_state.set_state(old_state) - x2 = self.random_state.standard_normal(size=16) - self.random_state.set_state(self.state) - x3 = self.random_state.standard_normal(size=16) + random_state, state = self._create_state() + old_state = state[:-2] + x1 = random_state.standard_normal(size=16) + random_state.set_state(old_state) + x2 = random_state.standard_normal(size=16) + random_state.set_state(state) + x3 = random_state.standard_normal(size=16) assert_(np.all(x1 == x2)) assert_(np.all(x1 == x3)) def test_negative_binomial(self): # Ensure that the negative binomial results take floating point # arguments without truncation. - self.random_state.negative_binomial(0.5, 0.5) + random_state, _ = self._create_state() + random_state.negative_binomial(0.5, 0.5) def test_get_state_warning(self): rs = random.RandomState(PCG64()) - with suppress_warnings() as sup: - w = sup.record(RuntimeWarning) + with pytest.warns(RuntimeWarning): state = rs.get_state() - assert_(len(w) == 1) - assert isinstance(state, dict) - assert state['bit_generator'] == 'PCG64' + assert isinstance(state, dict) + assert state['bit_generator'] == 'PCG64' def test_invalid_legacy_state_setting(self): - state = self.random_state.get_state() + random_state, state = self._create_state() + state = random_state.get_state() new_state = ('Unknown', ) + state[1:] - assert_raises(ValueError, self.random_state.set_state, new_state) - assert_raises(TypeError, self.random_state.set_state, + assert_raises(ValueError, random_state.set_state, new_state) + assert_raises(TypeError, random_state.set_state, np.array(new_state, dtype=object)) - state = self.random_state.get_state(legacy=False) + state = random_state.get_state(legacy=False) del state['bit_generator'] - assert_raises(ValueError, self.random_state.set_state, state) + assert_raises(ValueError, random_state.set_state, state) def test_pickle(self): - self.random_state.seed(0) - self.random_state.random_sample(100) - self.random_state.standard_normal() - pickled = self.random_state.get_state(legacy=False) + random_state, _ = self._create_state() + random_state.seed(0) + random_state.random_sample(100) + random_state.standard_normal() + pickled = random_state.get_state(legacy=False) assert_equal(pickled['has_gauss'], 1) - rs_unpick = pickle.loads(pickle.dumps(self.random_state)) + rs_unpick = pickle.loads(pickle.dumps(random_state)) unpickled = rs_unpick.get_state(legacy=False) assert_mt19937_state_equal(pickled, unpickled) def test_state_setting(self): - attr_state = self.random_state.__getstate__() - self.random_state.standard_normal() - self.random_state.__setstate__(attr_state) - state = self.random_state.get_state(legacy=False) + random_state, state = self._create_state() + attr_state = random_state.__getstate__() + random_state.standard_normal() + random_state.__setstate__(attr_state) + state = random_state.get_state(legacy=False) assert_mt19937_state_equal(attr_state, state) def test_repr(self): - assert repr(self.random_state).startswith('RandomState(MT19937)') + random_state, _ = self._create_state() + assert repr(random_state).startswith('RandomState(MT19937)') class TestRandint: - rfunc = random.randint - # valid integer/boolean types itype = [np.bool, np.int8, np.uint8, np.int16, np.uint16, np.int32, np.uint32, np.int64, np.uint64] def test_unsupported_type(self): - assert_raises(TypeError, self.rfunc, 1, dtype=float) + rng = np.random.RandomState() + assert_raises(TypeError, rng.randint, 1, dtype=float) def test_bounds_checking(self): + rng = np.random.RandomState() for dt in self.itype: lbnd = 0 if dt is np.bool else np.iinfo(dt).min ubnd = 2 if dt is np.bool else np.iinfo(dt).max + 1 - assert_raises(ValueError, self.rfunc, lbnd - 1, ubnd, dtype=dt) - assert_raises(ValueError, self.rfunc, lbnd, ubnd + 1, dtype=dt) - assert_raises(ValueError, self.rfunc, ubnd, lbnd, dtype=dt) - assert_raises(ValueError, self.rfunc, 1, 0, dtype=dt) + assert_raises(ValueError, rng.randint, lbnd - 1, ubnd, dtype=dt) + assert_raises(ValueError, rng.randint, lbnd, ubnd + 1, dtype=dt) + assert_raises(ValueError, rng.randint, ubnd, lbnd, dtype=dt) + assert_raises(ValueError, rng.randint, 1, 0, dtype=dt) def test_rng_zero_and_extremes(self): + rng = np.random.RandomState() for dt in self.itype: lbnd = 0 if dt is np.bool else np.iinfo(dt).min ubnd = 2 if dt is np.bool else np.iinfo(dt).max + 1 tgt = ubnd - 1 - assert_equal(self.rfunc(tgt, tgt + 1, size=1000, dtype=dt), tgt) + assert_equal(rng.randint(tgt, tgt + 1, size=1000, dtype=dt), tgt) tgt = lbnd - assert_equal(self.rfunc(tgt, tgt + 1, size=1000, dtype=dt), tgt) + assert_equal(rng.randint(tgt, tgt + 1, size=1000, dtype=dt), tgt) tgt = (lbnd + ubnd) // 2 - assert_equal(self.rfunc(tgt, tgt + 1, size=1000, dtype=dt), tgt) + assert_equal(rng.randint(tgt, tgt + 1, size=1000, dtype=dt), tgt) def test_full_range(self): # Test for ticket #1690 + rng = np.random.RandomState() for dt in self.itype: lbnd = 0 if dt is np.bool else np.iinfo(dt).min ubnd = 2 if dt is np.bool else np.iinfo(dt).max + 1 try: - self.rfunc(lbnd, ubnd, dtype=dt) + rng.randint(lbnd, ubnd, dtype=dt) except Exception as e: raise AssertionError("No error should have been raised, " "but one was with the following " @@ -330,15 +338,15 @@ def test_full_range(self): def test_in_bounds_fuzz(self): # Don't use fixed seed - random.seed() + rng = np.random.RandomState() for dt in self.itype[1:]: for ubnd in [4, 8, 16]: - vals = self.rfunc(2, ubnd, size=2**16, dtype=dt) + vals = rng.randint(2, ubnd, size=2**16, dtype=dt) assert_(vals.max() < ubnd) assert_(vals.min() >= 2) - vals = self.rfunc(0, 2, size=2**16, dtype=np.bool) + vals = rng.randint(0, 2, size=2**16, dtype=np.bool) assert_(vals.max() < 2) assert_(vals.min() >= 0) @@ -358,20 +366,20 @@ def test_repeatability(self): 'uint8': '001aac3a5acb935a9b186cbe14a1ca064b8bb2dd0b045d48abeacf74d0203404'} # noqa: E501 for dt in self.itype[1:]: - random.seed(1234) + rng = random.RandomState(1234) # view as little endian for hash if sys.byteorder == 'little': - val = self.rfunc(0, 6, size=1000, dtype=dt) + val = rng.randint(0, 6, size=1000, dtype=dt) else: - val = self.rfunc(0, 6, size=1000, dtype=dt).byteswap() + val = rng.randint(0, 6, size=1000, dtype=dt).byteswap() res = hashlib.sha256(val.view(np.int8)).hexdigest() assert_(tgt[np.dtype(dt).name] == res) # bools do not depend on endianness - random.seed(1234) - val = self.rfunc(0, 2, size=1000, dtype=bool).view(np.int8) + rng = random.RandomState(1234) + val = rng.randint(0, 2, size=1000, dtype=bool).view(np.int8) res = hashlib.sha256(val).hexdigest() assert_(tgt[np.dtype(bool).name] == res) @@ -394,8 +402,8 @@ def test_repeatability_32bit_boundary_broadcasting(self): [2978368172, 764731833, 2282559898], [ 105711276, 720447391, 3596512484]]]) for size in [None, (5, 3, 3)]: - random.seed(12345) - x = self.rfunc([[-1], [0], [1]], [2**32 - 1, 2**32, 2**32 + 1], + rng = random.RandomState(12345) + x = rng.randint([[-1], [0], [1]], [2**32 - 1, 2**32, 2**32 + 1], size=size) assert_array_equal(x, desired if size is not None else desired[0]) @@ -424,11 +432,13 @@ def test_int64_uint64_corner_case(self): def test_respect_dtype_singleton(self): # See gh-7203 + rng = np.random.RandomState() + for dt in self.itype: lbnd = 0 if dt is np.bool else np.iinfo(dt).min ubnd = 2 if dt is np.bool else np.iinfo(dt).max + 1 - sample = self.rfunc(lbnd, ubnd, dtype=dt) + sample = rng.randint(lbnd, ubnd, dtype=dt) assert_equal(sample.dtype, np.dtype(dt)) for dt in (bool, int): @@ -439,7 +449,7 @@ def test_respect_dtype_singleton(self): lbnd = 0 if dt is bool else np.iinfo(op_dtype).min ubnd = 2 if dt is bool else np.iinfo(op_dtype).max + 1 - sample = self.rfunc(lbnd, ubnd, dtype=dt) + sample = rng.randint(lbnd, ubnd, dtype=dt) assert_(not hasattr(sample, 'dtype')) assert_equal(type(sample), dt) @@ -447,64 +457,57 @@ def test_respect_dtype_singleton(self): class TestRandomDist: # Make sure the random distribution returns the correct value for a # given seed - - def setup_method(self): - self.seed = 1234567890 + seed = 1234567890 def test_rand(self): - random.seed(self.seed) - actual = random.rand(3, 2) + rng = random.RandomState(self.seed) + actual = rng.rand(3, 2) desired = np.array([[0.61879477158567997, 0.59162362775974664], [0.88868358904449662, 0.89165480011560816], [0.4575674820298663, 0.7781880808593471]]) assert_array_almost_equal(actual, desired, decimal=15) def test_rand_singleton(self): - random.seed(self.seed) - actual = random.rand() + rng = random.RandomState(self.seed) + actual = rng.rand() desired = 0.61879477158567997 assert_array_almost_equal(actual, desired, decimal=15) def test_randn(self): - random.seed(self.seed) - actual = random.randn(3, 2) + rng = random.RandomState(self.seed) + actual = rng.randn(3, 2) desired = np.array([[1.34016345771863121, 1.73759122771936081], [1.498988344300628, -0.2286433324536169], [2.031033998682787, 2.17032494605655257]]) assert_array_almost_equal(actual, desired, decimal=15) - random.seed(self.seed) - actual = random.randn() + rng = random.RandomState(self.seed) + actual = rng.randn() assert_array_almost_equal(actual, desired[0, 0], decimal=15) def test_randint(self): - random.seed(self.seed) - actual = random.randint(-99, 99, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.randint(-99, 99, size=(3, 2)) desired = np.array([[31, 3], [-52, 41], [-48, -66]]) assert_array_equal(actual, desired) def test_random_integers(self): - random.seed(self.seed) - with suppress_warnings() as sup: - w = sup.record(DeprecationWarning) - actual = random.random_integers(-99, 99, size=(3, 2)) - assert_(len(w) == 1) + rng = random.RandomState(self.seed) + with pytest.warns(DeprecationWarning): + actual = rng.random_integers(-99, 99, size=(3, 2)) desired = np.array([[31, 3], [-52, 41], [-48, -66]]) assert_array_equal(actual, desired) - random.seed(self.seed) - with suppress_warnings() as sup: - w = sup.record(DeprecationWarning) - actual = random.random_integers(198, size=(3, 2)) - assert_(len(w) == 1) + rng = random.RandomState(self.seed) + with pytest.warns(DeprecationWarning): + actual = rng.random_integers(198, size=(3, 2)) assert_array_equal(actual, desired + 100) def test_tomaxint(self): - random.seed(self.seed) rs = random.RandomState(self.seed) actual = rs.tomaxint(size=(3, 2)) if np.iinfo(np.long).max == 2147483647: @@ -529,20 +532,16 @@ def test_random_integers_max_int(self): # into a C long. Previous implementations of this # method have thrown an OverflowError when attempting # to generate this integer. - with suppress_warnings() as sup: - w = sup.record(DeprecationWarning) + with pytest.warns(DeprecationWarning): actual = random.random_integers(np.iinfo('l').max, np.iinfo('l').max) - assert_(len(w) == 1) desired = np.iinfo('l').max assert_equal(actual, desired) - with suppress_warnings() as sup: - w = sup.record(DeprecationWarning) + with pytest.warns(DeprecationWarning): typer = np.dtype('l').type actual = random.random_integers(typer(np.iinfo('l').max), typer(np.iinfo('l').max)) - assert_(len(w) == 1) assert_equal(actual, desired) def test_random_integers_deprecated(self): @@ -560,44 +559,44 @@ def test_random_integers_deprecated(self): np.iinfo('l').max, np.iinfo('l').max) def test_random_sample(self): - random.seed(self.seed) - actual = random.random_sample((3, 2)) + rng = random.RandomState(self.seed) + actual = rng.random_sample((3, 2)) desired = np.array([[0.61879477158567997, 0.59162362775974664], [0.88868358904449662, 0.89165480011560816], [0.4575674820298663, 0.7781880808593471]]) assert_array_almost_equal(actual, desired, decimal=15) - random.seed(self.seed) - actual = random.random_sample() + rng = random.RandomState(self.seed) + actual = rng.random_sample() assert_array_almost_equal(actual, desired[0, 0], decimal=15) def test_choice_uniform_replace(self): - random.seed(self.seed) - actual = random.choice(4, 4) + rng = random.RandomState(self.seed) + actual = rng.choice(4, 4) desired = np.array([2, 3, 2, 3]) assert_array_equal(actual, desired) def test_choice_nonuniform_replace(self): - random.seed(self.seed) - actual = random.choice(4, 4, p=[0.4, 0.4, 0.1, 0.1]) + rng = random.RandomState(self.seed) + actual = rng.choice(4, 4, p=[0.4, 0.4, 0.1, 0.1]) desired = np.array([1, 1, 2, 2]) assert_array_equal(actual, desired) def test_choice_uniform_noreplace(self): - random.seed(self.seed) - actual = random.choice(4, 3, replace=False) + rng = random.RandomState(self.seed) + actual = rng.choice(4, 3, replace=False) desired = np.array([0, 1, 3]) assert_array_equal(actual, desired) def test_choice_nonuniform_noreplace(self): - random.seed(self.seed) - actual = random.choice(4, 3, replace=False, p=[0.1, 0.3, 0.5, 0.1]) + rng = random.RandomState(self.seed) + actual = rng.choice(4, 3, replace=False, p=[0.1, 0.3, 0.5, 0.1]) desired = np.array([2, 3, 1]) assert_array_equal(actual, desired) def test_choice_noninteger(self): - random.seed(self.seed) - actual = random.choice(['a', 'b', 'c', 'd'], 4) + rng = random.RandomState(self.seed) + actual = rng.choice(['a', 'b', 'c', 'd'], 4) desired = np.array(['c', 'd', 'c', 'd']) assert_array_equal(actual, desired) @@ -674,15 +673,15 @@ def test_choice_nan_probabilities(self): def test_choice_p_non_contiguous(self): p = np.ones(10) / 5 p[1::2] = 3.0 - random.seed(self.seed) - non_contig = random.choice(5, 3, p=p[::2]) - random.seed(self.seed) - contig = random.choice(5, 3, p=np.ascontiguousarray(p[::2])) + rng = random.RandomState(self.seed) + non_contig = rng.choice(5, 3, p=p[::2]) + rng = random.RandomState(self.seed) + contig = rng.choice(5, 3, p=np.ascontiguousarray(p[::2])) assert_array_equal(non_contig, contig) def test_bytes(self): - random.seed(self.seed) - actual = random.bytes(10) + rng = random.RandomState(self.seed) + actual = rng.bytes(10) desired = b'\x82Ui\x9e\xff\x97+Wf\xa5' assert_equal(actual, desired) @@ -706,9 +705,9 @@ def test_shuffle(self): lambda x: np.asarray([(i, i) for i in x], [("a", object, (1,)), ("b", np.int32, (1,))])]: - random.seed(self.seed) + rng = random.RandomState(self.seed) alist = conv([1, 2, 3, 4, 5, 6, 7, 8, 9, 0]) - random.shuffle(alist) + rng.shuffle(alist) actual = alist desired = conv([0, 1, 9, 6, 2, 4, 5, 8, 7, 3]) assert_array_equal(actual, desired) @@ -732,35 +731,35 @@ def test_shuffle_invalid_objects(self): assert_raises(TypeError, random.shuffle, x) def test_permutation(self): - random.seed(self.seed) + rng = random.RandomState(self.seed) alist = [1, 2, 3, 4, 5, 6, 7, 8, 9, 0] - actual = random.permutation(alist) + actual = rng.permutation(alist) desired = [0, 1, 9, 6, 2, 4, 5, 8, 7, 3] assert_array_equal(actual, desired) - random.seed(self.seed) + rng = random.RandomState(self.seed) arr_2d = np.atleast_2d([1, 2, 3, 4, 5, 6, 7, 8, 9, 0]).T - actual = random.permutation(arr_2d) + actual = rng.permutation(arr_2d) assert_array_equal(actual, np.atleast_2d(desired).T) - random.seed(self.seed) + rng = random.RandomState(self.seed) bad_x_str = "abcd" assert_raises(IndexError, random.permutation, bad_x_str) - random.seed(self.seed) + rng = random.RandomState(self.seed) bad_x_float = 1.2 assert_raises(IndexError, random.permutation, bad_x_float) integer_val = 10 desired = [9, 0, 8, 5, 1, 3, 4, 7, 6, 2] - random.seed(self.seed) - actual = random.permutation(integer_val) + rng = random.RandomState(self.seed) + actual = rng.permutation(integer_val) assert_array_equal(actual, desired) def test_beta(self): - random.seed(self.seed) - actual = random.beta(.1, .9, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.beta(.1, .9, size=(3, 2)) desired = np.array( [[1.45341850513746058e-02, 5.31297615662868145e-04], [1.85366619058432324e-06, 4.19214516800110563e-03], @@ -768,30 +767,30 @@ def test_beta(self): assert_array_almost_equal(actual, desired, decimal=15) def test_binomial(self): - random.seed(self.seed) - actual = random.binomial(100.123, .456, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.binomial(100.123, .456, size=(3, 2)) desired = np.array([[37, 43], [42, 48], [46, 45]]) assert_array_equal(actual, desired) - random.seed(self.seed) - actual = random.binomial(100.123, .456) + rng = random.RandomState(self.seed) + actual = rng.binomial(100.123, .456) desired = 37 assert_array_equal(actual, desired) def test_chisquare(self): - random.seed(self.seed) - actual = random.chisquare(50, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.chisquare(50, size=(3, 2)) desired = np.array([[63.87858175501090585, 68.68407748911370447], [65.77116116901505904, 47.09686762438974483], [72.3828403199695174, 74.18408615260374006]]) assert_array_almost_equal(actual, desired, decimal=13) def test_dirichlet(self): - random.seed(self.seed) + rng = random.RandomState(self.seed) alpha = np.array([51.72840233779265162, 39.74494232180943953]) - actual = random.dirichlet(alpha, size=(3, 2)) + actual = rng.dirichlet(alpha, size=(3, 2)) desired = np.array([[[0.54539444573611562, 0.45460555426388438], [0.62345816822039413, 0.37654183177960598]], [[0.55206000085785778, 0.44793999914214233], @@ -802,9 +801,9 @@ def test_dirichlet(self): bad_alpha = np.array([5.4e-01, -1.0e-16]) assert_raises(ValueError, random.dirichlet, bad_alpha) - random.seed(self.seed) + rng = random.RandomState(self.seed) alpha = np.array([51.72840233779265162, 39.74494232180943953]) - actual = random.dirichlet(alpha) + actual = rng.dirichlet(alpha) assert_array_almost_equal(actual, desired[0, 0], decimal=15) def test_dirichlet_size(self): @@ -827,16 +826,16 @@ def test_dirichlet_bad_alpha(self): def test_dirichlet_alpha_non_contiguous(self): a = np.array([51.72840233779265162, -1.0, 39.74494232180943953]) alpha = a[::2] - random.seed(self.seed) - non_contig = random.dirichlet(alpha, size=(3, 2)) - random.seed(self.seed) - contig = random.dirichlet(np.ascontiguousarray(alpha), + rng = random.RandomState(self.seed) + non_contig = rng.dirichlet(alpha, size=(3, 2)) + rng = random.RandomState(self.seed) + contig = rng.dirichlet(np.ascontiguousarray(alpha), size=(3, 2)) assert_array_almost_equal(non_contig, contig) def test_exponential(self): - random.seed(self.seed) - actual = random.exponential(1.1234, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.exponential(1.1234, size=(3, 2)) desired = np.array([[1.08342649775011624, 1.00607889924557314], [2.46628830085216721, 2.49668106809923884], [0.68717433461363442, 1.69175666993575979]]) @@ -847,16 +846,16 @@ def test_exponential_0(self): assert_raises(ValueError, random.exponential, scale=-0.) def test_f(self): - random.seed(self.seed) - actual = random.f(12, 77, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.f(12, 77, size=(3, 2)) desired = np.array([[1.21975394418575878, 1.75135759791559775], [1.44803115017146489, 1.22108959480396262], [1.02176975757740629, 1.34431827623300415]]) assert_array_almost_equal(actual, desired, decimal=15) def test_gamma(self): - random.seed(self.seed) - actual = random.gamma(5, 3, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.gamma(5, 3, size=(3, 2)) desired = np.array([[24.60509188649287182, 28.54993563207210627], [26.13476110204064184, 12.56988482927716078], [31.71863275789960568, 33.30143302795922011]]) @@ -867,8 +866,8 @@ def test_gamma_0(self): assert_raises(ValueError, random.gamma, shape=-0., scale=-0.) def test_geometric(self): - random.seed(self.seed) - actual = random.geometric(.123456789, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.geometric(.123456789, size=(3, 2)) desired = np.array([[8, 7], [17, 17], [5, 12]]) @@ -879,14 +878,14 @@ def test_geometric_exceptions(self): assert_raises(ValueError, random.geometric, [1.1] * 10) assert_raises(ValueError, random.geometric, -0.1) assert_raises(ValueError, random.geometric, [-0.1] * 10) - with suppress_warnings() as sup: - sup.record(RuntimeWarning) + with warnings.catch_warnings(): + warnings.simplefilter('ignore', RuntimeWarning) assert_raises(ValueError, random.geometric, np.nan) assert_raises(ValueError, random.geometric, [np.nan] * 10) def test_gumbel(self): - random.seed(self.seed) - actual = random.gumbel(loc=.123456789, scale=2.0, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.gumbel(loc=.123456789, scale=2.0, size=(3, 2)) desired = np.array([[0.19591898743416816, 0.34405539668096674], [-1.4492522252274278, -1.47374816298446865], [1.10651090478803416, -0.69535848626236174]]) @@ -897,34 +896,34 @@ def test_gumbel_0(self): assert_raises(ValueError, random.gumbel, scale=-0.) def test_hypergeometric(self): - random.seed(self.seed) - actual = random.hypergeometric(10.1, 5.5, 14, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.hypergeometric(10.1, 5.5, 14, size=(3, 2)) desired = np.array([[10, 10], [10, 10], [9, 9]]) assert_array_equal(actual, desired) # Test nbad = 0 - actual = random.hypergeometric(5, 0, 3, size=4) + actual = rng.hypergeometric(5, 0, 3, size=4) desired = np.array([3, 3, 3, 3]) assert_array_equal(actual, desired) - actual = random.hypergeometric(15, 0, 12, size=4) + actual = rng.hypergeometric(15, 0, 12, size=4) desired = np.array([12, 12, 12, 12]) assert_array_equal(actual, desired) # Test ngood = 0 - actual = random.hypergeometric(0, 5, 3, size=4) + actual = rng.hypergeometric(0, 5, 3, size=4) desired = np.array([0, 0, 0, 0]) assert_array_equal(actual, desired) - actual = random.hypergeometric(0, 15, 12, size=4) + actual = rng.hypergeometric(0, 15, 12, size=4) desired = np.array([0, 0, 0, 0]) assert_array_equal(actual, desired) def test_laplace(self): - random.seed(self.seed) - actual = random.laplace(loc=.123456789, scale=2.0, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.laplace(loc=.123456789, scale=2.0, size=(3, 2)) desired = np.array([[0.66599721112760157, 0.52829452552221945], [3.12791959514407125, 3.18202813572992005], [-0.05391065675859356, 1.74901336242837324]]) @@ -935,16 +934,16 @@ def test_laplace_0(self): assert_raises(ValueError, random.laplace, scale=-0.) def test_logistic(self): - random.seed(self.seed) - actual = random.logistic(loc=.123456789, scale=2.0, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.logistic(loc=.123456789, scale=2.0, size=(3, 2)) desired = np.array([[1.09232835305011444, 0.8648196662399954], [4.27818590694950185, 4.33897006346929714], [-0.21682183359214885, 2.63373365386060332]]) assert_array_almost_equal(actual, desired, decimal=15) def test_lognormal(self): - random.seed(self.seed) - actual = random.lognormal(mean=.123456789, sigma=2.0, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.lognormal(mean=.123456789, sigma=2.0, size=(3, 2)) desired = np.array([[16.50698631688883822, 36.54846706092654784], [22.67886599981281748, 0.71617561058995771], [65.72798501792723869, 86.84341601437161273]]) @@ -955,8 +954,8 @@ def test_lognormal_0(self): assert_raises(ValueError, random.lognormal, sigma=-0.) def test_logseries(self): - random.seed(self.seed) - actual = random.logseries(p=.923456789, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.logseries(p=.923456789, size=(3, 2)) desired = np.array([[2, 2], [6, 17], [3, 6]]) @@ -978,8 +977,8 @@ def test_logseries_exceptions(self, value): random.logseries(np.array([value] * 10)[::2]) def test_multinomial(self): - random.seed(self.seed) - actual = random.multinomial(20, [1 / 6.] * 6, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.multinomial(20, [1 / 6.] * 6, size=(3, 2)) desired = np.array([[[4, 3, 5, 4, 2, 2], [5, 2, 8, 2, 2, 1]], [[3, 4, 3, 6, 0, 4], @@ -989,11 +988,11 @@ def test_multinomial(self): assert_array_equal(actual, desired) def test_multivariate_normal(self): - random.seed(self.seed) + rng = random.RandomState(self.seed) mean = (.123456789, 10) cov = [[1, 0], [0, 1]] size = (3, 2) - actual = random.multivariate_normal(mean, cov, size) + actual = rng.multivariate_normal(mean, cov, size) desired = np.array([[[1.463620246718631, 11.73759122771936], [1.622445133300628, 9.771356667546383]], [[2.154490787682787, 12.170324946056553], @@ -1004,7 +1003,7 @@ def test_multivariate_normal(self): assert_array_almost_equal(actual, desired, decimal=15) # Check for default size, was raising deprecation warning - actual = random.multivariate_normal(mean, cov) + actual = rng.multivariate_normal(mean, cov) desired = np.array([0.895289569463708, 9.17180864067987]) assert_array_almost_equal(actual, desired, decimal=15) @@ -1012,72 +1011,71 @@ def test_multivariate_normal(self): # RuntimeWarning mean = [0, 0] cov = [[1, 2], [2, 1]] - assert_warns(RuntimeWarning, random.multivariate_normal, mean, cov) + pytest.warns(RuntimeWarning, rng.multivariate_normal, mean, cov) # and that it doesn't warn with RuntimeWarning check_valid='ignore' - assert_no_warnings(random.multivariate_normal, mean, cov, + assert_no_warnings(rng.multivariate_normal, mean, cov, check_valid='ignore') # and that it raises with RuntimeWarning check_valid='raises' - assert_raises(ValueError, random.multivariate_normal, mean, cov, + assert_raises(ValueError, rng.multivariate_normal, mean, cov, check_valid='raise') cov = np.array([[1, 0.1], [0.1, 1]], dtype=np.float32) - with suppress_warnings() as sup: - random.multivariate_normal(mean, cov) - w = sup.record(RuntimeWarning) - assert len(w) == 0 + with warnings.catch_warnings(): + warnings.simplefilter('error', RuntimeWarning) + rng.multivariate_normal(mean, cov) mu = np.zeros(2) cov = np.eye(2) - assert_raises(ValueError, random.multivariate_normal, mean, cov, + assert_raises(ValueError, rng.multivariate_normal, mean, cov, check_valid='other') - assert_raises(ValueError, random.multivariate_normal, + assert_raises(ValueError, rng.multivariate_normal, np.zeros((2, 1, 1)), cov) - assert_raises(ValueError, random.multivariate_normal, + assert_raises(ValueError, rng.multivariate_normal, mu, np.empty((3, 2))) - assert_raises(ValueError, random.multivariate_normal, + assert_raises(ValueError, rng.multivariate_normal, mu, np.eye(3)) def test_negative_binomial(self): - random.seed(self.seed) - actual = random.negative_binomial(n=100, p=.12345, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.negative_binomial(n=100, p=.12345, size=(3, 2)) desired = np.array([[848, 841], [892, 611], [779, 647]]) assert_array_equal(actual, desired) def test_negative_binomial_exceptions(self): - with suppress_warnings() as sup: - sup.record(RuntimeWarning) + with warnings.catch_warnings(): + warnings.simplefilter('ignore', RuntimeWarning) assert_raises(ValueError, random.negative_binomial, 100, np.nan) assert_raises(ValueError, random.negative_binomial, 100, [np.nan] * 10) def test_noncentral_chisquare(self): - random.seed(self.seed) - actual = random.noncentral_chisquare(df=5, nonc=5, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.noncentral_chisquare(df=5, nonc=5, size=(3, 2)) desired = np.array([[23.91905354498517511, 13.35324692733826346], [31.22452661329736401, 16.60047399466177254], [5.03461598262724586, 17.94973089023519464]]) assert_array_almost_equal(actual, desired, decimal=14) - actual = random.noncentral_chisquare(df=.5, nonc=.2, size=(3, 2)) + actual = rng.noncentral_chisquare(df=.5, nonc=.2, size=(3, 2)) desired = np.array([[1.47145377828516666, 0.15052899268012659], [0.00943803056963588, 1.02647251615666169], [0.332334982684171, 0.15451287602753125]]) assert_array_almost_equal(actual, desired, decimal=14) - random.seed(self.seed) - actual = random.noncentral_chisquare(df=5, nonc=0, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.noncentral_chisquare(df=5, nonc=0, size=(3, 2)) desired = np.array([[9.597154162763948, 11.725484450296079], [10.413711048138335, 3.694475922923986], [13.484222138963087, 14.377255424602957]]) assert_array_almost_equal(actual, desired, decimal=14) def test_noncentral_f(self): - random.seed(self.seed) - actual = random.noncentral_f(dfnum=5, dfden=2, nonc=1, + rng = random.RandomState(self.seed) + actual = rng.noncentral_f(dfnum=5, dfden=2, nonc=1, size=(3, 2)) desired = np.array([[1.40598099674926669, 0.34207973179285761], [3.57715069265772545, 7.92632662577829805], @@ -1090,8 +1088,8 @@ def test_noncentral_f_nan(self): assert np.isnan(actual) def test_normal(self): - random.seed(self.seed) - actual = random.normal(loc=.123456789, scale=2.0, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.normal(loc=.123456789, scale=2.0, size=(3, 2)) desired = np.array([[2.80378370443726244, 3.59863924443872163], [3.121433477601256, -0.33382987590723379], [4.18552478636557357, 4.46410668111310471]]) @@ -1102,8 +1100,8 @@ def test_normal_0(self): assert_raises(ValueError, random.normal, scale=-0.) def test_pareto(self): - random.seed(self.seed) - actual = random.pareto(a=.123456789, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.pareto(a=.123456789, size=(3, 2)) desired = np.array( [[2.46852460439034849e+03, 1.41286880810518346e+03], [5.28287797029485181e+07, 6.57720981047328785e+07], @@ -1117,8 +1115,8 @@ def test_pareto(self): np.testing.assert_array_almost_equal_nulp(actual, desired, nulp=30) def test_poisson(self): - random.seed(self.seed) - actual = random.poisson(lam=.123456789, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.poisson(lam=.123456789, size=(3, 2)) desired = np.array([[0, 0], [1, 0], [0, 0]]) @@ -1131,22 +1129,22 @@ def test_poisson_exceptions(self): assert_raises(ValueError, random.poisson, [lamneg] * 10) assert_raises(ValueError, random.poisson, lambig) assert_raises(ValueError, random.poisson, [lambig] * 10) - with suppress_warnings() as sup: - sup.record(RuntimeWarning) + with warnings.catch_warnings(): + warnings.simplefilter('ignore', RuntimeWarning) assert_raises(ValueError, random.poisson, np.nan) assert_raises(ValueError, random.poisson, [np.nan] * 10) def test_power(self): - random.seed(self.seed) - actual = random.power(a=.123456789, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.power(a=.123456789, size=(3, 2)) desired = np.array([[0.02048932883240791, 0.01424192241128213], [0.38446073748535298, 0.39499689943484395], [0.00177699707563439, 0.13115505880863756]]) assert_array_almost_equal(actual, desired, decimal=15) def test_rayleigh(self): - random.seed(self.seed) - actual = random.rayleigh(scale=10, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.rayleigh(scale=10, size=(3, 2)) desired = np.array([[13.8882496494248393, 13.383318339044731], [20.95413364294492098, 21.08285015800712614], [11.06066537006854311, 17.35468505778271009]]) @@ -1157,24 +1155,24 @@ def test_rayleigh_0(self): assert_raises(ValueError, random.rayleigh, scale=-0.) def test_standard_cauchy(self): - random.seed(self.seed) - actual = random.standard_cauchy(size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.standard_cauchy(size=(3, 2)) desired = np.array([[0.77127660196445336, -6.55601161955910605], [0.93582023391158309, -2.07479293013759447], [-4.74601644297011926, 0.18338989290760804]]) assert_array_almost_equal(actual, desired, decimal=15) def test_standard_exponential(self): - random.seed(self.seed) - actual = random.standard_exponential(size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.standard_exponential(size=(3, 2)) desired = np.array([[0.96441739162374596, 0.89556604882105506], [2.1953785836319808, 2.22243285392490542], [0.6116915921431676, 1.50592546727413201]]) assert_array_almost_equal(actual, desired, decimal=15) def test_standard_gamma(self): - random.seed(self.seed) - actual = random.standard_gamma(shape=3, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.standard_gamma(shape=3, size=(3, 2)) desired = np.array([[5.50841531318455058, 6.62953470301903103], [5.93988484943779227, 2.31044849402133989], [7.54838614231317084, 8.012756093271868]]) @@ -1185,30 +1183,30 @@ def test_standard_gamma_0(self): assert_raises(ValueError, random.standard_gamma, shape=-0.) def test_standard_normal(self): - random.seed(self.seed) - actual = random.standard_normal(size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.standard_normal(size=(3, 2)) desired = np.array([[1.34016345771863121, 1.73759122771936081], [1.498988344300628, -0.2286433324536169], [2.031033998682787, 2.17032494605655257]]) assert_array_almost_equal(actual, desired, decimal=15) def test_randn_singleton(self): - random.seed(self.seed) - actual = random.randn() + rng = random.RandomState(self.seed) + actual = rng.randn() desired = np.array(1.34016345771863121) assert_array_almost_equal(actual, desired, decimal=15) def test_standard_t(self): - random.seed(self.seed) - actual = random.standard_t(df=10, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.standard_t(df=10, size=(3, 2)) desired = np.array([[0.97140611862659965, -0.08830486548450577], [1.36311143689505321, -0.55317463909867071], [-0.18473749069684214, 0.61181537341755321]]) assert_array_almost_equal(actual, desired, decimal=15) def test_triangular(self): - random.seed(self.seed) - actual = random.triangular(left=5.12, mode=10.23, right=20.34, + rng = random.RandomState(self.seed) + actual = rng.triangular(left=5.12, mode=10.23, right=20.34, size=(3, 2)) desired = np.array([[12.68117178949215784, 12.4129206149193152], [16.20131377335158263, 16.25692138747600524], @@ -1216,8 +1214,8 @@ def test_triangular(self): assert_array_almost_equal(actual, desired, decimal=14) def test_uniform(self): - random.seed(self.seed) - actual = random.uniform(low=1.23, high=10.54, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.uniform(low=1.23, high=10.54, size=(3, 2)) desired = np.array([[6.99097932346268003, 6.73801597444323974], [9.50364421400426274, 9.53130618907631089], [5.48995325769805476, 8.47493103280052118]]) @@ -1262,8 +1260,8 @@ def __int__(self): assert_raises(TypeError, random.hypergeometric, throwing_int, 1, 1) def test_vonmises(self): - random.seed(self.seed) - actual = random.vonmises(mu=1.23, kappa=1.54, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.vonmises(mu=1.23, kappa=1.54, size=(3, 2)) desired = np.array([[2.28567572673902042, 2.89163838442285037], [0.38198375564286025, 2.57638023113890746], [1.19153771588353052, 1.83509849681825354]]) @@ -1277,8 +1275,8 @@ def test_vonmises_small(self): def test_vonmises_large(self): # guard against changes in RandomState when Generator is fixed - random.seed(self.seed) - actual = random.vonmises(mu=0., kappa=1e7, size=3) + rng = random.RandomState(self.seed) + actual = rng.vonmises(mu=0., kappa=1e7, size=3) desired = np.array([4.634253748521111e-04, 3.558873596114509e-04, -2.337119622577433e-04]) @@ -1290,16 +1288,16 @@ def test_vonmises_nan(self): assert_(np.isnan(r)) def test_wald(self): - random.seed(self.seed) - actual = random.wald(mean=1.23, scale=1.54, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.wald(mean=1.23, scale=1.54, size=(3, 2)) desired = np.array([[3.82935265715889983, 5.13125249184285526], [0.35045403618358717, 1.50832396872003538], [0.24124319895843183, 0.22031101461955038]]) assert_array_almost_equal(actual, desired, decimal=14) def test_weibull(self): - random.seed(self.seed) - actual = random.weibull(a=1.23, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.weibull(a=1.23, size=(3, 2)) desired = np.array([[0.97097342648766727, 0.91422896443565516], [1.89517770034962929, 1.91414357960479564], [0.67057783752390987, 1.39494046635066793]]) @@ -1311,8 +1309,8 @@ def test_weibull_0(self): assert_raises(ValueError, random.weibull, a=-0.) def test_zipf(self): - random.seed(self.seed) - actual = random.zipf(a=1.23, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.zipf(a=1.23, size=(3, 2)) desired = np.array([[66, 29], [1, 1], [3, 13]]) @@ -1322,138 +1320,127 @@ def test_zipf(self): class TestBroadcast: # tests that functions that broadcast behave # correctly when presented with non-scalar arguments - def setup_method(self): - self.seed = 123456789 - - def set_seed(self): - random.seed(self.seed) + seed = 123456789 def test_uniform(self): low = [0] high = [1] - uniform = random.uniform desired = np.array([0.53283302478975902, 0.53413660089041659, 0.50955303552646702]) - self.set_seed() - actual = uniform(low * 3, high) + rng = random.RandomState(self.seed) + actual = rng.uniform(low * 3, high) assert_array_almost_equal(actual, desired, decimal=14) - self.set_seed() - actual = uniform(low, high * 3) + rng = random.RandomState(self.seed) + actual = rng.uniform(low, high * 3) assert_array_almost_equal(actual, desired, decimal=14) def test_normal(self): loc = [0] scale = [1] bad_scale = [-1] - normal = random.normal desired = np.array([2.2129019979039612, 2.1283977976520019, 1.8417114045748335]) - self.set_seed() - actual = normal(loc * 3, scale) + rng = random.RandomState(self.seed) + actual = rng.normal(loc * 3, scale) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, normal, loc * 3, bad_scale) + assert_raises(ValueError, rng.normal, loc * 3, bad_scale) - self.set_seed() - actual = normal(loc, scale * 3) + rng = random.RandomState(self.seed) + actual = rng.normal(loc, scale * 3) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, normal, loc, bad_scale * 3) + assert_raises(ValueError, rng.normal, loc, bad_scale * 3) def test_beta(self): a = [1] b = [2] bad_a = [-1] bad_b = [-2] - beta = random.beta desired = np.array([0.19843558305989056, 0.075230336409423643, 0.24976865978980844]) - self.set_seed() - actual = beta(a * 3, b) + rng = random.RandomState(self.seed) + actual = rng.beta(a * 3, b) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, beta, bad_a * 3, b) - assert_raises(ValueError, beta, a * 3, bad_b) + assert_raises(ValueError, rng.beta, bad_a * 3, b) + assert_raises(ValueError, rng.beta, a * 3, bad_b) - self.set_seed() - actual = beta(a, b * 3) + rng = random.RandomState(self.seed) + actual = rng.beta(a, b * 3) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, beta, bad_a, b * 3) - assert_raises(ValueError, beta, a, bad_b * 3) + assert_raises(ValueError, rng.beta, bad_a, b * 3) + assert_raises(ValueError, rng.beta, a, bad_b * 3) def test_exponential(self): scale = [1] bad_scale = [-1] - exponential = random.exponential desired = np.array([0.76106853658845242, 0.76386282278691653, 0.71243813125891797]) - self.set_seed() - actual = exponential(scale * 3) + rng = random.RandomState(self.seed) + actual = rng.exponential(scale * 3) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, exponential, bad_scale * 3) + assert_raises(ValueError, rng.exponential, bad_scale * 3) def test_standard_gamma(self): shape = [1] bad_shape = [-1] - std_gamma = random.standard_gamma desired = np.array([0.76106853658845242, 0.76386282278691653, 0.71243813125891797]) - self.set_seed() - actual = std_gamma(shape * 3) + rng = random.RandomState(self.seed) + actual = rng.standard_gamma(shape * 3) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, std_gamma, bad_shape * 3) + assert_raises(ValueError, rng.standard_gamma, bad_shape * 3) def test_gamma(self): shape = [1] scale = [2] bad_shape = [-1] bad_scale = [-2] - gamma = random.gamma desired = np.array([1.5221370731769048, 1.5277256455738331, 1.4248762625178359]) - self.set_seed() - actual = gamma(shape * 3, scale) + rng = random.RandomState(self.seed) + actual = rng.gamma(shape * 3, scale) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, gamma, bad_shape * 3, scale) - assert_raises(ValueError, gamma, shape * 3, bad_scale) + assert_raises(ValueError, rng.gamma, bad_shape * 3, scale) + assert_raises(ValueError, rng.gamma, shape * 3, bad_scale) - self.set_seed() - actual = gamma(shape, scale * 3) + rng = random.RandomState(self.seed) + actual = rng.gamma(shape, scale * 3) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, gamma, bad_shape, scale * 3) - assert_raises(ValueError, gamma, shape, bad_scale * 3) + assert_raises(ValueError, rng.gamma, bad_shape, scale * 3) + assert_raises(ValueError, rng.gamma, shape, bad_scale * 3) def test_f(self): dfnum = [1] dfden = [2] bad_dfnum = [-1] bad_dfden = [-2] - f = random.f desired = np.array([0.80038951638264799, 0.86768719635363512, 2.7251095168386801]) - self.set_seed() - actual = f(dfnum * 3, dfden) + rng = random.RandomState(self.seed) + actual = rng.f(dfnum * 3, dfden) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, f, bad_dfnum * 3, dfden) - assert_raises(ValueError, f, dfnum * 3, bad_dfden) + assert_raises(ValueError, rng.f, bad_dfnum * 3, dfden) + assert_raises(ValueError, rng.f, dfnum * 3, bad_dfden) - self.set_seed() - actual = f(dfnum, dfden * 3) + rng = random.RandomState(self.seed) + actual = rng.f(dfnum, dfden * 3) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, f, bad_dfnum, dfden * 3) - assert_raises(ValueError, f, dfnum, bad_dfden * 3) + assert_raises(ValueError, rng.f, bad_dfnum, dfden * 3) + assert_raises(ValueError, rng.f, dfnum, bad_dfden * 3) def test_noncentral_f(self): dfnum = [2] @@ -1462,267 +1449,253 @@ def test_noncentral_f(self): bad_dfnum = [0] bad_dfden = [-1] bad_nonc = [-2] - nonc_f = random.noncentral_f desired = np.array([9.1393943263705211, 13.025456344595602, 8.8018098359100545]) - self.set_seed() - actual = nonc_f(dfnum * 3, dfden, nonc) + rng = random.RandomState(self.seed) + actual = rng.noncentral_f(dfnum * 3, dfden, nonc) assert_array_almost_equal(actual, desired, decimal=14) - assert np.all(np.isnan(nonc_f(dfnum, dfden, [np.nan] * 3))) + assert np.all(np.isnan(rng.noncentral_f(dfnum, dfden, [np.nan] * 3))) - assert_raises(ValueError, nonc_f, bad_dfnum * 3, dfden, nonc) - assert_raises(ValueError, nonc_f, dfnum * 3, bad_dfden, nonc) - assert_raises(ValueError, nonc_f, dfnum * 3, dfden, bad_nonc) + assert_raises(ValueError, rng.noncentral_f, bad_dfnum * 3, dfden, nonc) + assert_raises(ValueError, rng.noncentral_f, dfnum * 3, bad_dfden, nonc) + assert_raises(ValueError, rng.noncentral_f, dfnum * 3, dfden, bad_nonc) - self.set_seed() - actual = nonc_f(dfnum, dfden * 3, nonc) + rng = random.RandomState(self.seed) + actual = rng.noncentral_f(dfnum, dfden * 3, nonc) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, nonc_f, bad_dfnum, dfden * 3, nonc) - assert_raises(ValueError, nonc_f, dfnum, bad_dfden * 3, nonc) - assert_raises(ValueError, nonc_f, dfnum, dfden * 3, bad_nonc) + assert_raises(ValueError, rng.noncentral_f, bad_dfnum, dfden * 3, nonc) + assert_raises(ValueError, rng.noncentral_f, dfnum, bad_dfden * 3, nonc) + assert_raises(ValueError, rng.noncentral_f, dfnum, dfden * 3, bad_nonc) - self.set_seed() - actual = nonc_f(dfnum, dfden, nonc * 3) + rng = random.RandomState(self.seed) + actual = rng.noncentral_f(dfnum, dfden, nonc * 3) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, nonc_f, bad_dfnum, dfden, nonc * 3) - assert_raises(ValueError, nonc_f, dfnum, bad_dfden, nonc * 3) - assert_raises(ValueError, nonc_f, dfnum, dfden, bad_nonc * 3) + assert_raises(ValueError, rng.noncentral_f, bad_dfnum, dfden, nonc * 3) + assert_raises(ValueError, rng.noncentral_f, dfnum, bad_dfden, nonc * 3) + assert_raises(ValueError, rng.noncentral_f, dfnum, dfden, bad_nonc * 3) def test_noncentral_f_small_df(self): - self.set_seed() + rng = random.RandomState(self.seed) desired = np.array([6.869638627492048, 0.785880199263955]) - actual = random.noncentral_f(0.9, 0.9, 2, size=2) + actual = rng.noncentral_f(0.9, 0.9, 2, size=2) assert_array_almost_equal(actual, desired, decimal=14) def test_chisquare(self): df = [1] bad_df = [-1] - chisquare = random.chisquare desired = np.array([0.57022801133088286, 0.51947702108840776, 0.1320969254923558]) - self.set_seed() - actual = chisquare(df * 3) + rng = random.RandomState(self.seed) + actual = rng.chisquare(df * 3) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, chisquare, bad_df * 3) + assert_raises(ValueError, rng.chisquare, bad_df * 3) def test_noncentral_chisquare(self): df = [1] nonc = [2] bad_df = [-1] bad_nonc = [-2] - nonc_chi = random.noncentral_chisquare desired = np.array([9.0015599467913763, 4.5804135049718742, 6.0872302432834564]) - self.set_seed() - actual = nonc_chi(df * 3, nonc) + rng = random.RandomState(self.seed) + actual = rng.noncentral_chisquare(df * 3, nonc) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, nonc_chi, bad_df * 3, nonc) - assert_raises(ValueError, nonc_chi, df * 3, bad_nonc) + assert_raises(ValueError, rng.noncentral_chisquare, bad_df * 3, nonc) + assert_raises(ValueError, rng.noncentral_chisquare, df * 3, bad_nonc) - self.set_seed() - actual = nonc_chi(df, nonc * 3) + rng = random.RandomState(self.seed) + actual = rng.noncentral_chisquare(df, nonc * 3) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, nonc_chi, bad_df, nonc * 3) - assert_raises(ValueError, nonc_chi, df, bad_nonc * 3) + assert_raises(ValueError, rng.noncentral_chisquare, bad_df, nonc * 3) + assert_raises(ValueError, rng.noncentral_chisquare, df, bad_nonc * 3) def test_standard_t(self): df = [1] bad_df = [-1] - t = random.standard_t desired = np.array([3.0702872575217643, 5.8560725167361607, 1.0274791436474273]) - self.set_seed() - actual = t(df * 3) + rng = random.RandomState(self.seed) + actual = rng.standard_t(df * 3) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, t, bad_df * 3) + assert_raises(ValueError, rng.standard_t, bad_df * 3) assert_raises(ValueError, random.standard_t, bad_df * 3) def test_vonmises(self): mu = [2] kappa = [1] bad_kappa = [-1] - vonmises = random.vonmises desired = np.array([2.9883443664201312, -2.7064099483995943, -1.8672476700665914]) - self.set_seed() - actual = vonmises(mu * 3, kappa) + rng = random.RandomState(self.seed) + actual = rng.vonmises(mu * 3, kappa) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, vonmises, mu * 3, bad_kappa) + assert_raises(ValueError, rng.vonmises, mu * 3, bad_kappa) - self.set_seed() - actual = vonmises(mu, kappa * 3) + rng = random.RandomState(self.seed) + actual = rng.vonmises(mu, kappa * 3) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, vonmises, mu, bad_kappa * 3) + assert_raises(ValueError, rng.vonmises, mu, bad_kappa * 3) def test_pareto(self): a = [1] bad_a = [-1] - pareto = random.pareto desired = np.array([1.1405622680198362, 1.1465519762044529, 1.0389564467453547]) - self.set_seed() - actual = pareto(a * 3) + rng = random.RandomState(self.seed) + actual = rng.pareto(a * 3) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, pareto, bad_a * 3) + assert_raises(ValueError, rng.pareto, bad_a * 3) assert_raises(ValueError, random.pareto, bad_a * 3) def test_weibull(self): a = [1] bad_a = [-1] - weibull = random.weibull desired = np.array([0.76106853658845242, 0.76386282278691653, 0.71243813125891797]) - self.set_seed() - actual = weibull(a * 3) + rng = random.RandomState(self.seed) + actual = rng.weibull(a * 3) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, weibull, bad_a * 3) + assert_raises(ValueError, rng.weibull, bad_a * 3) assert_raises(ValueError, random.weibull, bad_a * 3) def test_power(self): a = [1] bad_a = [-1] - power = random.power desired = np.array([0.53283302478975902, 0.53413660089041659, 0.50955303552646702]) - self.set_seed() - actual = power(a * 3) + rng = random.RandomState(self.seed) + actual = rng.power(a * 3) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, power, bad_a * 3) + assert_raises(ValueError, rng.power, bad_a * 3) assert_raises(ValueError, random.power, bad_a * 3) def test_laplace(self): loc = [0] scale = [1] bad_scale = [-1] - laplace = random.laplace desired = np.array([0.067921356028507157, 0.070715642226971326, 0.019290950698972624]) - self.set_seed() - actual = laplace(loc * 3, scale) + rng = random.RandomState(self.seed) + actual = rng.laplace(loc * 3, scale) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, laplace, loc * 3, bad_scale) + assert_raises(ValueError, rng.laplace, loc * 3, bad_scale) - self.set_seed() - actual = laplace(loc, scale * 3) + rng = random.RandomState(self.seed) + actual = rng.laplace(loc, scale * 3) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, laplace, loc, bad_scale * 3) + assert_raises(ValueError, rng.laplace, loc, bad_scale * 3) def test_gumbel(self): loc = [0] scale = [1] bad_scale = [-1] - gumbel = random.gumbel desired = np.array([0.2730318639556768, 0.26936705726291116, 0.33906220393037939]) - self.set_seed() - actual = gumbel(loc * 3, scale) + rng = random.RandomState(self.seed) + actual = rng.gumbel(loc * 3, scale) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, gumbel, loc * 3, bad_scale) + assert_raises(ValueError, rng.gumbel, loc * 3, bad_scale) - self.set_seed() - actual = gumbel(loc, scale * 3) + rng = random.RandomState(self.seed) + actual = rng.gumbel(loc, scale * 3) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, gumbel, loc, bad_scale * 3) + assert_raises(ValueError, rng.gumbel, loc, bad_scale * 3) def test_logistic(self): loc = [0] scale = [1] bad_scale = [-1] - logistic = random.logistic desired = np.array([0.13152135837586171, 0.13675915696285773, 0.038216792802833396]) - self.set_seed() - actual = logistic(loc * 3, scale) + rng = random.RandomState(self.seed) + actual = rng.logistic(loc * 3, scale) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, logistic, loc * 3, bad_scale) + assert_raises(ValueError, rng.logistic, loc * 3, bad_scale) - self.set_seed() - actual = logistic(loc, scale * 3) + rng = random.RandomState(self.seed) + actual = rng.logistic(loc, scale * 3) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, logistic, loc, bad_scale * 3) - assert_equal(random.logistic(1.0, 0.0), 1.0) + assert_raises(ValueError, rng.logistic, loc, bad_scale * 3) + assert_equal(rng.logistic(1.0, 0.0), 1.0) def test_lognormal(self): mean = [0] sigma = [1] bad_sigma = [-1] - lognormal = random.lognormal desired = np.array([9.1422086044848427, 8.4013952870126261, 6.3073234116578671]) - self.set_seed() - actual = lognormal(mean * 3, sigma) + rng = random.RandomState(self.seed) + actual = rng.lognormal(mean * 3, sigma) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, lognormal, mean * 3, bad_sigma) + assert_raises(ValueError, rng.lognormal, mean * 3, bad_sigma) assert_raises(ValueError, random.lognormal, mean * 3, bad_sigma) - self.set_seed() - actual = lognormal(mean, sigma * 3) + rng = random.RandomState(self.seed) + actual = rng.lognormal(mean, sigma * 3) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, lognormal, mean, bad_sigma * 3) + assert_raises(ValueError, rng.lognormal, mean, bad_sigma * 3) assert_raises(ValueError, random.lognormal, mean, bad_sigma * 3) def test_rayleigh(self): scale = [1] bad_scale = [-1] - rayleigh = random.rayleigh desired = np.array([1.2337491937897689, 1.2360119924878694, 1.1936818095781789]) - self.set_seed() - actual = rayleigh(scale * 3) + rng = random.RandomState(self.seed) + actual = rng.rayleigh(scale * 3) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, rayleigh, bad_scale * 3) + assert_raises(ValueError, rng.rayleigh, bad_scale * 3) def test_wald(self): mean = [0.5] scale = [1] bad_mean = [0] bad_scale = [-2] - wald = random.wald desired = np.array([0.11873681120271318, 0.12450084820795027, 0.9096122728408238]) - self.set_seed() - actual = wald(mean * 3, scale) + rng = random.RandomState(self.seed) + actual = rng.wald(mean * 3, scale) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, wald, bad_mean * 3, scale) - assert_raises(ValueError, wald, mean * 3, bad_scale) + assert_raises(ValueError, rng.wald, bad_mean * 3, scale) + assert_raises(ValueError, rng.wald, mean * 3, bad_scale) assert_raises(ValueError, random.wald, bad_mean * 3, scale) assert_raises(ValueError, random.wald, mean * 3, bad_scale) - self.set_seed() - actual = wald(mean, scale * 3) + rng = random.RandomState(self.seed) + actual = rng.wald(mean, scale * 3) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, wald, bad_mean, scale * 3) - assert_raises(ValueError, wald, mean, bad_scale * 3) - assert_raises(ValueError, wald, 0.0, 1) - assert_raises(ValueError, wald, 0.5, 0.0) + assert_raises(ValueError, rng.wald, bad_mean, scale * 3) + assert_raises(ValueError, rng.wald, mean, bad_scale * 3) + assert_raises(ValueError, rng.wald, 0.0, 1) + assert_raises(ValueError, rng.wald, 0.5, 0.0) def test_triangular(self): left = [1] @@ -1731,38 +1704,37 @@ def test_triangular(self): bad_left_one = [3] bad_mode_one = [4] bad_left_two, bad_mode_two = right * 2 - triangular = random.triangular desired = np.array([2.03339048710429, 2.0347400359389356, 2.0095991069536208]) - self.set_seed() - actual = triangular(left * 3, mode, right) + rng = random.RandomState(self.seed) + actual = rng.triangular(left * 3, mode, right) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, triangular, bad_left_one * 3, mode, right) - assert_raises(ValueError, triangular, left * 3, bad_mode_one, right) - assert_raises(ValueError, triangular, bad_left_two * 3, bad_mode_two, + assert_raises(ValueError, rng.triangular, bad_left_one * 3, mode, right) + assert_raises(ValueError, rng.triangular, left * 3, bad_mode_one, right) + assert_raises(ValueError, rng.triangular, bad_left_two * 3, bad_mode_two, right) - self.set_seed() - actual = triangular(left, mode * 3, right) + rng = random.RandomState(self.seed) + actual = rng.triangular(left, mode * 3, right) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, triangular, bad_left_one, mode * 3, right) - assert_raises(ValueError, triangular, left, bad_mode_one * 3, right) - assert_raises(ValueError, triangular, bad_left_two, bad_mode_two * 3, + assert_raises(ValueError, rng.triangular, bad_left_one, mode * 3, right) + assert_raises(ValueError, rng.triangular, left, bad_mode_one * 3, right) + assert_raises(ValueError, rng.triangular, bad_left_two, bad_mode_two * 3, right) - self.set_seed() - actual = triangular(left, mode, right * 3) + rng = random.RandomState(self.seed) + actual = rng.triangular(left, mode, right * 3) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, triangular, bad_left_one, mode, right * 3) - assert_raises(ValueError, triangular, left, bad_mode_one, right * 3) - assert_raises(ValueError, triangular, bad_left_two, bad_mode_two, + assert_raises(ValueError, rng.triangular, bad_left_one, mode, right * 3) + assert_raises(ValueError, rng.triangular, left, bad_mode_one, right * 3) + assert_raises(ValueError, rng.triangular, bad_left_two, bad_mode_two, right * 3) - assert_raises(ValueError, triangular, 10., 0., 20.) - assert_raises(ValueError, triangular, 10., 25., 20.) - assert_raises(ValueError, triangular, 10., 10., 10.) + assert_raises(ValueError, rng.triangular, 10., 0., 20.) + assert_raises(ValueError, rng.triangular, 10., 25., 20.) + assert_raises(ValueError, rng.triangular, 10., 10., 10.) def test_binomial(self): n = [1] @@ -1770,22 +1742,21 @@ def test_binomial(self): bad_n = [-1] bad_p_one = [-1] bad_p_two = [1.5] - binom = random.binomial desired = np.array([1, 1, 1]) - self.set_seed() - actual = binom(n * 3, p) + rng = random.RandomState(self.seed) + actual = rng.binomial(n * 3, p) assert_array_equal(actual, desired) - assert_raises(ValueError, binom, bad_n * 3, p) - assert_raises(ValueError, binom, n * 3, bad_p_one) - assert_raises(ValueError, binom, n * 3, bad_p_two) + assert_raises(ValueError, rng.binomial, bad_n * 3, p) + assert_raises(ValueError, rng.binomial, n * 3, bad_p_one) + assert_raises(ValueError, rng.binomial, n * 3, bad_p_two) - self.set_seed() - actual = binom(n, p * 3) + rng = random.RandomState(self.seed) + actual = rng.binomial(n, p * 3) assert_array_equal(actual, desired) - assert_raises(ValueError, binom, bad_n, p * 3) - assert_raises(ValueError, binom, n, bad_p_one * 3) - assert_raises(ValueError, binom, n, bad_p_two * 3) + assert_raises(ValueError, rng.binomial, bad_n, p * 3) + assert_raises(ValueError, rng.binomial, n, bad_p_one * 3) + assert_raises(ValueError, rng.binomial, n, bad_p_two * 3) def test_negative_binomial(self): n = [1] @@ -1793,22 +1764,21 @@ def test_negative_binomial(self): bad_n = [-1] bad_p_one = [-1] bad_p_two = [1.5] - neg_binom = random.negative_binomial desired = np.array([1, 0, 1]) - self.set_seed() - actual = neg_binom(n * 3, p) + rng = random.RandomState(self.seed) + actual = rng.negative_binomial(n * 3, p) assert_array_equal(actual, desired) - assert_raises(ValueError, neg_binom, bad_n * 3, p) - assert_raises(ValueError, neg_binom, n * 3, bad_p_one) - assert_raises(ValueError, neg_binom, n * 3, bad_p_two) + assert_raises(ValueError, rng.negative_binomial, bad_n * 3, p) + assert_raises(ValueError, rng.negative_binomial, n * 3, bad_p_one) + assert_raises(ValueError, rng.negative_binomial, n * 3, bad_p_two) - self.set_seed() - actual = neg_binom(n, p * 3) + rng = random.RandomState(self.seed) + actual = rng.negative_binomial(n, p * 3) assert_array_equal(actual, desired) - assert_raises(ValueError, neg_binom, bad_n, p * 3) - assert_raises(ValueError, neg_binom, n, bad_p_one * 3) - assert_raises(ValueError, neg_binom, n, bad_p_two * 3) + assert_raises(ValueError, rng.negative_binomial, bad_n, p * 3) + assert_raises(ValueError, rng.negative_binomial, n, bad_p_one * 3) + assert_raises(ValueError, rng.negative_binomial, n, bad_p_two * 3) def test_poisson(self): max_lam = random.RandomState()._poisson_lam_max @@ -1816,41 +1786,38 @@ def test_poisson(self): lam = [1] bad_lam_one = [-1] bad_lam_two = [max_lam * 2] - poisson = random.poisson desired = np.array([1, 1, 0]) - self.set_seed() - actual = poisson(lam * 3) + rng = random.RandomState(self.seed) + actual = rng.poisson(lam * 3) assert_array_equal(actual, desired) - assert_raises(ValueError, poisson, bad_lam_one * 3) - assert_raises(ValueError, poisson, bad_lam_two * 3) + assert_raises(ValueError, rng.poisson, bad_lam_one * 3) + assert_raises(ValueError, rng.poisson, bad_lam_two * 3) def test_zipf(self): a = [2] bad_a = [0] - zipf = random.zipf desired = np.array([2, 2, 1]) - self.set_seed() - actual = zipf(a * 3) + rng = random.RandomState(self.seed) + actual = rng.zipf(a * 3) assert_array_equal(actual, desired) - assert_raises(ValueError, zipf, bad_a * 3) + assert_raises(ValueError, rng.zipf, bad_a * 3) with np.errstate(invalid='ignore'): - assert_raises(ValueError, zipf, np.nan) - assert_raises(ValueError, zipf, [0, 0, np.nan]) + assert_raises(ValueError, rng.zipf, np.nan) + assert_raises(ValueError, rng.zipf, [0, 0, np.nan]) def test_geometric(self): p = [0.5] bad_p_one = [-1] bad_p_two = [1.5] - geom = random.geometric desired = np.array([2, 2, 2]) - self.set_seed() - actual = geom(p * 3) + rng = random.RandomState(self.seed) + actual = rng.geometric(p * 3) assert_array_equal(actual, desired) - assert_raises(ValueError, geom, bad_p_one * 3) - assert_raises(ValueError, geom, bad_p_two * 3) + assert_raises(ValueError, rng.geometric, bad_p_one * 3) + assert_raises(ValueError, rng.geometric, bad_p_two * 3) def test_hypergeometric(self): ngood = [1] @@ -1860,57 +1827,54 @@ def test_hypergeometric(self): bad_nbad = [-2] bad_nsample_one = [0] bad_nsample_two = [4] - hypergeom = random.hypergeometric desired = np.array([1, 1, 1]) - self.set_seed() - actual = hypergeom(ngood * 3, nbad, nsample) + rng = random.RandomState(self.seed) + actual = rng.hypergeometric(ngood * 3, nbad, nsample) assert_array_equal(actual, desired) - assert_raises(ValueError, hypergeom, bad_ngood * 3, nbad, nsample) - assert_raises(ValueError, hypergeom, ngood * 3, bad_nbad, nsample) - assert_raises(ValueError, hypergeom, ngood * 3, nbad, bad_nsample_one) - assert_raises(ValueError, hypergeom, ngood * 3, nbad, bad_nsample_two) + assert_raises(ValueError, rng.hypergeometric, bad_ngood * 3, nbad, nsample) + assert_raises(ValueError, rng.hypergeometric, ngood * 3, bad_nbad, nsample) + assert_raises(ValueError, rng.hypergeometric, ngood * 3, nbad, bad_nsample_one) + assert_raises(ValueError, rng.hypergeometric, ngood * 3, nbad, bad_nsample_two) - self.set_seed() - actual = hypergeom(ngood, nbad * 3, nsample) + rng = random.RandomState(self.seed) + actual = rng.hypergeometric(ngood, nbad * 3, nsample) assert_array_equal(actual, desired) - assert_raises(ValueError, hypergeom, bad_ngood, nbad * 3, nsample) - assert_raises(ValueError, hypergeom, ngood, bad_nbad * 3, nsample) - assert_raises(ValueError, hypergeom, ngood, nbad * 3, bad_nsample_one) - assert_raises(ValueError, hypergeom, ngood, nbad * 3, bad_nsample_two) + assert_raises(ValueError, rng.hypergeometric, bad_ngood, nbad * 3, nsample) + assert_raises(ValueError, rng.hypergeometric, ngood, bad_nbad * 3, nsample) + assert_raises(ValueError, rng.hypergeometric, ngood, nbad * 3, bad_nsample_one) + assert_raises(ValueError, rng.hypergeometric, ngood, nbad * 3, bad_nsample_two) - self.set_seed() - actual = hypergeom(ngood, nbad, nsample * 3) + rng = random.RandomState(self.seed) + actual = rng.hypergeometric(ngood, nbad, nsample * 3) assert_array_equal(actual, desired) - assert_raises(ValueError, hypergeom, bad_ngood, nbad, nsample * 3) - assert_raises(ValueError, hypergeom, ngood, bad_nbad, nsample * 3) - assert_raises(ValueError, hypergeom, ngood, nbad, bad_nsample_one * 3) - assert_raises(ValueError, hypergeom, ngood, nbad, bad_nsample_two * 3) + assert_raises(ValueError, rng.hypergeometric, bad_ngood, nbad, nsample * 3) + assert_raises(ValueError, rng.hypergeometric, ngood, bad_nbad, nsample * 3) + assert_raises(ValueError, rng.hypergeometric, ngood, nbad, bad_nsample_one * 3) + assert_raises(ValueError, rng.hypergeometric, ngood, nbad, bad_nsample_two * 3) - assert_raises(ValueError, hypergeom, -1, 10, 20) - assert_raises(ValueError, hypergeom, 10, -1, 20) - assert_raises(ValueError, hypergeom, 10, 10, 0) - assert_raises(ValueError, hypergeom, 10, 10, 25) + assert_raises(ValueError, rng.hypergeometric, -1, 10, 20) + assert_raises(ValueError, rng.hypergeometric, 10, -1, 20) + assert_raises(ValueError, rng.hypergeometric, 10, 10, 0) + assert_raises(ValueError, rng.hypergeometric, 10, 10, 25) def test_logseries(self): p = [0.5] bad_p_one = [2] bad_p_two = [-1] - logseries = random.logseries desired = np.array([1, 1, 1]) - self.set_seed() - actual = logseries(p * 3) + rng = random.RandomState(self.seed) + actual = rng.logseries(p * 3) assert_array_equal(actual, desired) - assert_raises(ValueError, logseries, bad_p_one * 3) - assert_raises(ValueError, logseries, bad_p_two * 3) + assert_raises(ValueError, rng.logseries, bad_p_one * 3) + assert_raises(ValueError, rng.logseries, bad_p_two * 3) @pytest.mark.skipif(IS_WASM, reason="can't start thread") class TestThread: # make sure each state produces the same sequence even in threads - def setup_method(self): - self.seeds = range(4) + seeds = range(4) def check_function(self, function, sz): from threading import Thread @@ -1955,13 +1919,11 @@ def gen_random(state, out): # See Issue #4263 class TestSingleEltArrayInput: - def setup_method(self): - self.argOne = np.array([2]) - self.argTwo = np.array([3]) - self.argThree = np.array([4]) - self.tgtShape = (1,) + def _create_arrays(self): + return np.array([2]), np.array([3]), np.array([4]), (1,) def test_one_arg_funcs(self): + argOne, _, _, tgtShape = self._create_arrays() funcs = (random.exponential, random.standard_gamma, random.chisquare, random.standard_t, random.pareto, random.weibull, @@ -1976,11 +1938,12 @@ def test_one_arg_funcs(self): out = func(np.array([0.5])) else: - out = func(self.argOne) + out = func(argOne) - assert_equal(out.shape, self.tgtShape) + assert_equal(out.shape, tgtShape) def test_two_arg_funcs(self): + argOne, argTwo, _, tgtShape = self._create_arrays() funcs = (random.uniform, random.normal, random.beta, random.gamma, random.f, random.noncentral_chisquare, @@ -1996,30 +1959,31 @@ def test_two_arg_funcs(self): argTwo = np.array([0.5]) else: - argTwo = self.argTwo + argTwo = argTwo - out = func(self.argOne, argTwo) - assert_equal(out.shape, self.tgtShape) + out = func(argOne, argTwo) + assert_equal(out.shape, tgtShape) - out = func(self.argOne[0], argTwo) - assert_equal(out.shape, self.tgtShape) + out = func(argOne[0], argTwo) + assert_equal(out.shape, tgtShape) - out = func(self.argOne, argTwo[0]) - assert_equal(out.shape, self.tgtShape) + out = func(argOne, argTwo[0]) + assert_equal(out.shape, tgtShape) def test_three_arg_funcs(self): + argOne, argTwo, argThree, tgtShape = self._create_arrays() funcs = [random.noncentral_f, random.triangular, random.hypergeometric] for func in funcs: - out = func(self.argOne, self.argTwo, self.argThree) - assert_equal(out.shape, self.tgtShape) + out = func(argOne, argTwo, argThree) + assert_equal(out.shape, tgtShape) - out = func(self.argOne[0], self.argTwo, self.argThree) - assert_equal(out.shape, self.tgtShape) + out = func(argOne[0], argTwo, argThree) + assert_equal(out.shape, tgtShape) - out = func(self.argOne, self.argTwo[0], self.argThree) - assert_equal(out.shape, self.tgtShape) + out = func(argOne, argTwo[0], argThree) + assert_equal(out.shape, tgtShape) # Ensure returned array dtype is correct for platform @@ -2032,9 +1996,9 @@ def test_integer_dtype(int_func): def test_integer_repeat(int_func): - random.seed(123456789) + rng = random.RandomState(123456789) fname, args, sha256 = int_func - f = getattr(random, fname) + f = getattr(rng, fname) val = f(*args, size=1000000) if sys.byteorder != 'little': val = val.byteswap() @@ -2070,6 +2034,7 @@ def test_randomstate_ctor_old_style_pickle(): assert_equal(state_a['gauss'], state_b['gauss']) +@pytest.mark.thread_unsafe(reason="np.random.set_bit_generator affects global state") def test_hot_swap(restore_singleton_bitgen): # GH 21808 def_bg = np.random.default_rng(0) @@ -2081,6 +2046,7 @@ def test_hot_swap(restore_singleton_bitgen): assert bg is second_bg +@pytest.mark.thread_unsafe(reason="np.random.set_bit_generator affects global state") def test_seed_alt_bit_gen(restore_singleton_bitgen): # GH 21808 bg = PCG64(0) @@ -2095,6 +2061,7 @@ def test_seed_alt_bit_gen(restore_singleton_bitgen): assert state["state"]["inc"] != new_state["state"]["inc"] +@pytest.mark.thread_unsafe(reason="np.random.set_bit_generator affects global state") def test_state_error_alt_bit_gen(restore_singleton_bitgen): # GH 21808 state = np.random.get_state() @@ -2104,6 +2071,7 @@ def test_state_error_alt_bit_gen(restore_singleton_bitgen): np.random.set_state(state) +@pytest.mark.thread_unsafe(reason="np.random.set_bit_generator affects global state") def test_swap_worked(restore_singleton_bitgen): # GH 21808 np.random.seed(98765) @@ -2122,6 +2090,7 @@ def test_swap_worked(restore_singleton_bitgen): assert new_state["state"]["inc"] == new_state["state"]["inc"] +@pytest.mark.thread_unsafe(reason="np.random.set_bit_generator affects global state") def test_swapped_singleton_against_direct(restore_singleton_bitgen): np.random.set_bit_generator(PCG64(98765)) singleton_vals = np.random.randint(0, 2 ** 30, 10) diff --git a/numpy/random/tests/test_randomstate_regression.py b/numpy/random/tests/test_randomstate_regression.py index 6ccc6180657c..1c8882d1b672 100644 --- a/numpy/random/tests/test_randomstate_regression.py +++ b/numpy/random/tests/test_randomstate_regression.py @@ -4,11 +4,7 @@ import numpy as np from numpy import random -from numpy.testing import ( - assert_, - assert_array_equal, - assert_raises, -) +from numpy.testing import assert_, assert_array_equal, assert_raises class TestRegression: @@ -58,9 +54,9 @@ def test_shuffle_mixed_dimension(self): [(1, 1), (2, 2), (3, 3), None], [1, (2, 2), (3, 3), None], [(1, 1), 2, 3, None]]: - random.seed(12345) + rng = random.RandomState(12345) shuffled = list(t) - random.shuffle(shuffled) + rng.shuffle(shuffled) expected = np.array([t[0], t[3], t[1], t[2]], dtype=object) assert_array_equal(np.array(shuffled, dtype=object), expected) @@ -135,9 +131,9 @@ def test_permutation_subclass(self): class N(np.ndarray): pass - random.seed(1) + rng = random.RandomState(1) orig = np.arange(3).view(N) - perm = random.permutation(orig) + perm = rng.permutation(orig) assert_array_equal(perm, np.array([0, 2, 1])) assert_array_equal(orig, np.arange(3).view(N)) @@ -147,9 +143,9 @@ class M: def __array__(self, dtype=None, copy=None): return self.a - random.seed(1) + rng = random.RandomState(1) m = M() - perm = random.permutation(m) + perm = rng.permutation(m) assert_array_equal(perm, np.array([2, 1, 4, 0, 3])) assert_array_equal(m.__array__(), np.arange(5)) @@ -180,27 +176,27 @@ def test_choice_retun_dtype(self): reason='Cannot test with 32-bit C long') def test_randint_117(self): # GH 14189 - random.seed(0) + rng = random.RandomState(0) expected = np.array([2357136044, 2546248239, 3071714933, 3626093760, 2588848963, 3684848379, 2340255427, 3638918503, 1819583497, 2678185683], dtype='int64') - actual = random.randint(2**32, size=10) + actual = rng.randint(2**32, size=10) assert_array_equal(actual, expected) def test_p_zero_stream(self): # Regression test for gh-14522. Ensure that future versions # generate the same variates as version 1.16. - np.random.seed(12345) - assert_array_equal(random.binomial(1, [0, 0.25, 0.5, 0.75, 1]), + rng = random.RandomState(12345) + assert_array_equal(rng.binomial(1, [0, 0.25, 0.5, 0.75, 1]), [0, 0, 0, 1, 1]) def test_n_zero_stream(self): # Regression test for gh-14522. Ensure that future versions # generate the same variates as version 1.16. - np.random.seed(8675309) + rng = random.RandomState(8675309) expected = np.array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [3, 4, 2, 3, 3, 1, 5, 3, 1, 3]]) - assert_array_equal(random.binomial([[0], [10]], 0.25, size=(2, 10)), + assert_array_equal(rng.binomial([[0], [10]], 0.25, size=(2, 10)), expected) diff --git a/numpy/random/tests/test_regression.py b/numpy/random/tests/test_regression.py index 39b7d8c719ac..eeaf6d2b4bd3 100644 --- a/numpy/random/tests/test_regression.py +++ b/numpy/random/tests/test_regression.py @@ -1,12 +1,11 @@ +import inspect import sys +import pytest + import numpy as np from numpy import random -from numpy.testing import ( - assert_, - assert_array_equal, - assert_raises, -) +from numpy.testing import IS_PYPY, assert_, assert_array_equal, assert_raises class TestRegression: @@ -56,9 +55,9 @@ def test_shuffle_mixed_dimension(self): [(1, 1), (2, 2), (3, 3), None], [1, (2, 2), (3, 3), None], [(1, 1), 2, 3, None]]: - np.random.seed(12345) + rng = np.random.RandomState(12345) shuffled = list(t) - random.shuffle(shuffled) + rng.shuffle(shuffled) expected = np.array([t[0], t[3], t[1], t[2]], dtype=object) assert_array_equal(np.array(shuffled, dtype=object), expected) @@ -133,9 +132,9 @@ def test_permutation_subclass(self): class N(np.ndarray): pass - np.random.seed(1) + rng = np.random.RandomState(1) orig = np.arange(3).view(N) - perm = np.random.permutation(orig) + perm = rng.permutation(orig) assert_array_equal(perm, np.array([0, 2, 1])) assert_array_equal(orig, np.arange(3).view(N)) @@ -145,8 +144,32 @@ class M: def __array__(self, dtype=None, copy=None): return self.a - np.random.seed(1) + rng = np.random.RandomState(1) m = M() - perm = np.random.permutation(m) + perm = rng.permutation(m) assert_array_equal(perm, np.array([2, 1, 4, 0, 3])) assert_array_equal(m.__array__(), np.arange(5)) + + @pytest.mark.skipif(sys.flags.optimize == 2, reason="Python running -OO") + @pytest.mark.skipif(IS_PYPY, reason="PyPy does not modify tp_doc") + @pytest.mark.parametrize( + "cls", + [ + random.Generator, + random.MT19937, + random.PCG64, + random.PCG64DXSM, + random.Philox, + random.RandomState, + random.SFC64, + random.BitGenerator, + random.SeedSequence, + random.bit_generator.SeedlessSeedSequence, + ], + ) + def test_inspect_signature(self, cls: type) -> None: + assert hasattr(cls, "__text_signature__") + try: + inspect.signature(cls) + except ValueError: + pytest.fail(f"invalid signature: {cls.__module__}.{cls.__qualname__}") diff --git a/numpy/random/tests/test_smoke.py b/numpy/random/tests/test_smoke.py index 6f07443f79a9..5353a72a1174 100644 --- a/numpy/random/tests/test_smoke.py +++ b/numpy/random/tests/test_smoke.py @@ -1,4 +1,5 @@ import pickle +from dataclasses import dataclass from functools import partial import pytest @@ -7,12 +8,8 @@ from numpy.random import MT19937, PCG64, PCG64DXSM, SFC64, Generator, Philox from numpy.testing import assert_, assert_array_equal, assert_equal - -@pytest.fixture(scope='module', - params=(np.bool, np.int8, np.int16, np.int32, np.int64, - np.uint8, np.uint16, np.uint32, np.uint64)) -def dtype(request): - return request.param +DTYPES_BOOL_INT_UINT = (np.bool, np.int8, np.int16, np.int32, np.int64, + np.uint8, np.uint16, np.uint32, np.uint64) def params_0(f): @@ -92,403 +89,459 @@ def warmup(rg, n=None): rg.random(n, dtype=np.float32) +@dataclass +class RNGData: + bit_generator: type[np.random.BitGenerator] + advance: int + seed: list[int] + rg: Generator + seed_vector_bits: int + + class RNG: @classmethod - def setup_class(cls): + def _create_rng(cls): # Overridden in test classes. Place holder to silence IDE noise - cls.bit_generator = PCG64 - cls.advance = None - cls.seed = [12345] - cls.rg = Generator(cls.bit_generator(*cls.seed)) - cls.initial_state = cls.rg.bit_generator.state - cls.seed_vector_bits = 64 - cls._extra_setup() - - @classmethod - def _extra_setup(cls): - cls.vec_1d = np.arange(2.0, 102.0) - cls.vec_2d = np.arange(2.0, 102.0)[None, :] - cls.mat = np.arange(2.0, 102.0, 0.01).reshape((100, 100)) - cls.seed_error = TypeError - - def _reset_state(self): - self.rg.bit_generator.state = self.initial_state + bit_generator = PCG64 + advance = None + seed = [12345] + rg = Generator(bit_generator(*seed)) + seed_vector_bits = 64 + return RNGData(bit_generator, advance, seed, rg, seed_vector_bits) def test_init(self): - rg = Generator(self.bit_generator()) - state = rg.bit_generator.state - rg.standard_normal(1) - rg.standard_normal(1) - rg.bit_generator.state = state - new_state = rg.bit_generator.state + data = self._create_rng() + data.rg = Generator(data.bit_generator()) + state = data.rg.bit_generator.state + data.rg.standard_normal(1) + data.rg.standard_normal(1) + data.rg.bit_generator.state = state + new_state = data.rg.bit_generator.state assert_(comp_state(state, new_state)) def test_advance(self): - state = self.rg.bit_generator.state - if hasattr(self.rg.bit_generator, 'advance'): - self.rg.bit_generator.advance(self.advance) - assert_(not comp_state(state, self.rg.bit_generator.state)) + data = self._create_rng() + state = data.rg.bit_generator.state + if hasattr(data.rg.bit_generator, 'advance'): + data.rg.bit_generator.advance(data.advance) + assert_(not comp_state(state, data.rg.bit_generator.state)) else: - bitgen_name = self.rg.bit_generator.__class__.__name__ + bitgen_name = data.rg.bit_generator.__class__.__name__ pytest.skip(f'Advance is not supported by {bitgen_name}') def test_jump(self): - state = self.rg.bit_generator.state - if hasattr(self.rg.bit_generator, 'jumped'): - bit_gen2 = self.rg.bit_generator.jumped() + rg = self._create_rng().rg + state = rg.bit_generator.state + if hasattr(rg.bit_generator, 'jumped'): + bit_gen2 = rg.bit_generator.jumped() jumped_state = bit_gen2.state assert_(not comp_state(state, jumped_state)) - self.rg.random(2 * 3 * 5 * 7 * 11 * 13 * 17) - self.rg.bit_generator.state = state - bit_gen3 = self.rg.bit_generator.jumped() + rg.random(2 * 3 * 5 * 7 * 11 * 13 * 17) + rg.bit_generator.state = state + bit_gen3 = rg.bit_generator.jumped() rejumped_state = bit_gen3.state assert_(comp_state(jumped_state, rejumped_state)) else: - bitgen_name = self.rg.bit_generator.__class__.__name__ + bitgen_name = rg.bit_generator.__class__.__name__ if bitgen_name not in ('SFC64',): raise AttributeError(f'no "jumped" in {bitgen_name}') pytest.skip(f'Jump is not supported by {bitgen_name}') def test_uniform(self): - r = self.rg.uniform(-1.0, 0.0, size=10) + rg = self._create_rng().rg + r = rg.uniform(-1.0, 0.0, size=10) assert_(len(r) == 10) assert_((r > -1).all()) assert_((r <= 0).all()) def test_uniform_array(self): - r = self.rg.uniform(np.array([-1.0] * 10), 0.0, size=10) + rg = self._create_rng().rg + r = rg.uniform(np.array([-1.0] * 10), 0.0, size=10) assert_(len(r) == 10) assert_((r > -1).all()) assert_((r <= 0).all()) - r = self.rg.uniform(np.array([-1.0] * 10), + r = rg.uniform(np.array([-1.0] * 10), np.array([0.0] * 10), size=10) assert_(len(r) == 10) assert_((r > -1).all()) assert_((r <= 0).all()) - r = self.rg.uniform(-1.0, np.array([0.0] * 10), size=10) + r = rg.uniform(-1.0, np.array([0.0] * 10), size=10) assert_(len(r) == 10) assert_((r > -1).all()) assert_((r <= 0).all()) def test_random(self): - assert_(len(self.rg.random(10)) == 10) - params_0(self.rg.random) + rg = self._create_rng().rg + assert_(len(rg.random(10)) == 10) + params_0(rg.random) def test_standard_normal_zig(self): - assert_(len(self.rg.standard_normal(10)) == 10) + rg = self._create_rng().rg + assert_(len(rg.standard_normal(10)) == 10) def test_standard_normal(self): - assert_(len(self.rg.standard_normal(10)) == 10) - params_0(self.rg.standard_normal) + rg = self._create_rng().rg + assert_(len(rg.standard_normal(10)) == 10) + params_0(rg.standard_normal) def test_standard_gamma(self): - assert_(len(self.rg.standard_gamma(10, 10)) == 10) - assert_(len(self.rg.standard_gamma(np.array([10] * 10), 10)) == 10) - params_1(self.rg.standard_gamma) + rg = self._create_rng().rg + assert_(len(rg.standard_gamma(10, 10)) == 10) + assert_(len(rg.standard_gamma(np.array([10] * 10), 10)) == 10) + params_1(rg.standard_gamma) def test_standard_exponential(self): - assert_(len(self.rg.standard_exponential(10)) == 10) - params_0(self.rg.standard_exponential) + rg = self._create_rng().rg + assert_(len(rg.standard_exponential(10)) == 10) + params_0(rg.standard_exponential) def test_standard_exponential_float(self): - randoms = self.rg.standard_exponential(10, dtype='float32') + rg = self._create_rng().rg + randoms = rg.standard_exponential(10, dtype='float32') assert_(len(randoms) == 10) assert randoms.dtype == np.float32 - params_0(partial(self.rg.standard_exponential, dtype='float32')) + params_0(partial(rg.standard_exponential, dtype='float32')) def test_standard_exponential_float_log(self): - randoms = self.rg.standard_exponential(10, dtype='float32', + rg = self._create_rng().rg + randoms = rg.standard_exponential(10, dtype='float32', method='inv') assert_(len(randoms) == 10) assert randoms.dtype == np.float32 - params_0(partial(self.rg.standard_exponential, dtype='float32', + params_0(partial(rg.standard_exponential, dtype='float32', method='inv')) def test_standard_cauchy(self): - assert_(len(self.rg.standard_cauchy(10)) == 10) - params_0(self.rg.standard_cauchy) + rg = self._create_rng().rg + assert_(len(rg.standard_cauchy(10)) == 10) + params_0(rg.standard_cauchy) def test_standard_t(self): - assert_(len(self.rg.standard_t(10, 10)) == 10) - params_1(self.rg.standard_t) + rg = self._create_rng().rg + assert_(len(rg.standard_t(10, 10)) == 10) + params_1(rg.standard_t) def test_binomial(self): - assert_(self.rg.binomial(10, .5) >= 0) - assert_(self.rg.binomial(1000, .5) >= 0) + rg = self._create_rng().rg + assert_(rg.binomial(10, .5) >= 0) + assert_(rg.binomial(1000, .5) >= 0) def test_reset_state(self): - state = self.rg.bit_generator.state - int_1 = self.rg.integers(2**31) - self.rg.bit_generator.state = state - int_2 = self.rg.integers(2**31) + rg = self._create_rng().rg + state = rg.bit_generator.state + int_1 = rg.integers(2**31) + rg.bit_generator.state = state + int_2 = rg.integers(2**31) assert_(int_1 == int_2) def test_entropy_init(self): - rg = Generator(self.bit_generator()) - rg2 = Generator(self.bit_generator()) + bit_generator = self._create_rng().bit_generator + rg = Generator(bit_generator()) + rg2 = Generator(bit_generator()) assert_(not comp_state(rg.bit_generator.state, rg2.bit_generator.state)) def test_seed(self): - rg = Generator(self.bit_generator(*self.seed)) - rg2 = Generator(self.bit_generator(*self.seed)) + data = self._create_rng() + rg = Generator(data.bit_generator(*data.seed)) + rg2 = Generator(data.bit_generator(*data.seed)) rg.random() rg2.random() assert_(comp_state(rg.bit_generator.state, rg2.bit_generator.state)) def test_reset_state_gauss(self): - rg = Generator(self.bit_generator(*self.seed)) + data = self._create_rng() + rg = Generator(data.bit_generator(*data.seed)) rg.standard_normal() state = rg.bit_generator.state n1 = rg.standard_normal(size=10) - rg2 = Generator(self.bit_generator()) + rg2 = Generator(data.bit_generator()) rg2.bit_generator.state = state n2 = rg2.standard_normal(size=10) assert_array_equal(n1, n2) def test_reset_state_uint32(self): - rg = Generator(self.bit_generator(*self.seed)) + data = self._create_rng() + rg = Generator(data.bit_generator(*data.seed)) rg.integers(0, 2 ** 24, 120, dtype=np.uint32) state = rg.bit_generator.state n1 = rg.integers(0, 2 ** 24, 10, dtype=np.uint32) - rg2 = Generator(self.bit_generator()) + rg2 = Generator(data.bit_generator()) rg2.bit_generator.state = state n2 = rg2.integers(0, 2 ** 24, 10, dtype=np.uint32) assert_array_equal(n1, n2) def test_reset_state_float(self): - rg = Generator(self.bit_generator(*self.seed)) + data = self._create_rng() + rg = Generator(data.bit_generator(*data.seed)) rg.random(dtype='float32') state = rg.bit_generator.state n1 = rg.random(size=10, dtype='float32') - rg2 = Generator(self.bit_generator()) + rg2 = Generator(data.bit_generator()) rg2.bit_generator.state = state n2 = rg2.random(size=10, dtype='float32') assert_((n1 == n2).all()) def test_shuffle(self): + rg = self._create_rng().rg original = np.arange(200, 0, -1) - permuted = self.rg.permutation(original) + permuted = rg.permutation(original) assert_((original != permuted).any()) def test_permutation(self): + rg = self._create_rng().rg original = np.arange(200, 0, -1) - permuted = self.rg.permutation(original) + permuted = rg.permutation(original) assert_((original != permuted).any()) def test_beta(self): - vals = self.rg.beta(2.0, 2.0, 10) + rg = self._create_rng().rg + vals = rg.beta(2.0, 2.0, 10) assert_(len(vals) == 10) - vals = self.rg.beta(np.array([2.0] * 10), 2.0) + vals = rg.beta(np.array([2.0] * 10), 2.0) assert_(len(vals) == 10) - vals = self.rg.beta(2.0, np.array([2.0] * 10)) + vals = rg.beta(2.0, np.array([2.0] * 10)) assert_(len(vals) == 10) - vals = self.rg.beta(np.array([2.0] * 10), np.array([2.0] * 10)) + vals = rg.beta(np.array([2.0] * 10), np.array([2.0] * 10)) assert_(len(vals) == 10) - vals = self.rg.beta(np.array([2.0] * 10), np.array([[2.0]] * 10)) + vals = rg.beta(np.array([2.0] * 10), np.array([[2.0]] * 10)) assert_(vals.shape == (10, 10)) def test_bytes(self): - vals = self.rg.bytes(10) + rg = self._create_rng().rg + vals = rg.bytes(10) assert_(len(vals) == 10) def test_chisquare(self): - vals = self.rg.chisquare(2.0, 10) + rg = self._create_rng().rg + vals = rg.chisquare(2.0, 10) assert_(len(vals) == 10) - params_1(self.rg.chisquare) + params_1(rg.chisquare) def test_exponential(self): - vals = self.rg.exponential(2.0, 10) + rg = self._create_rng().rg + vals = rg.exponential(2.0, 10) assert_(len(vals) == 10) - params_1(self.rg.exponential) + params_1(rg.exponential) def test_f(self): - vals = self.rg.f(3, 1000, 10) + rg = self._create_rng().rg + vals = rg.f(3, 1000, 10) assert_(len(vals) == 10) def test_gamma(self): - vals = self.rg.gamma(3, 2, 10) + rg = self._create_rng().rg + vals = rg.gamma(3, 2, 10) assert_(len(vals) == 10) def test_geometric(self): - vals = self.rg.geometric(0.5, 10) + rg = self._create_rng().rg + vals = rg.geometric(0.5, 10) assert_(len(vals) == 10) - params_1(self.rg.exponential, bounded=True) + params_1(rg.exponential, bounded=True) def test_gumbel(self): - vals = self.rg.gumbel(2.0, 2.0, 10) + rg = self._create_rng().rg + vals = rg.gumbel(2.0, 2.0, 10) assert_(len(vals) == 10) def test_laplace(self): - vals = self.rg.laplace(2.0, 2.0, 10) + rg = self._create_rng().rg + vals = rg.laplace(2.0, 2.0, 10) assert_(len(vals) == 10) def test_logitic(self): - vals = self.rg.logistic(2.0, 2.0, 10) + rg = self._create_rng().rg + vals = rg.logistic(2.0, 2.0, 10) assert_(len(vals) == 10) def test_logseries(self): - vals = self.rg.logseries(0.5, 10) + rg = self._create_rng().rg + vals = rg.logseries(0.5, 10) assert_(len(vals) == 10) def test_negative_binomial(self): - vals = self.rg.negative_binomial(10, 0.2, 10) + rg = self._create_rng().rg + vals = rg.negative_binomial(10, 0.2, 10) assert_(len(vals) == 10) def test_noncentral_chisquare(self): - vals = self.rg.noncentral_chisquare(10, 2, 10) + rg = self._create_rng().rg + vals = rg.noncentral_chisquare(10, 2, 10) assert_(len(vals) == 10) def test_noncentral_f(self): - vals = self.rg.noncentral_f(3, 1000, 2, 10) + rg = self._create_rng().rg + vals = rg.noncentral_f(3, 1000, 2, 10) assert_(len(vals) == 10) - vals = self.rg.noncentral_f(np.array([3] * 10), 1000, 2) + vals = rg.noncentral_f(np.array([3] * 10), 1000, 2) assert_(len(vals) == 10) - vals = self.rg.noncentral_f(3, np.array([1000] * 10), 2) + vals = rg.noncentral_f(3, np.array([1000] * 10), 2) assert_(len(vals) == 10) - vals = self.rg.noncentral_f(3, 1000, np.array([2] * 10)) + vals = rg.noncentral_f(3, 1000, np.array([2] * 10)) assert_(len(vals) == 10) def test_normal(self): - vals = self.rg.normal(10, 0.2, 10) + rg = self._create_rng().rg + vals = rg.normal(10, 0.2, 10) assert_(len(vals) == 10) def test_pareto(self): - vals = self.rg.pareto(3.0, 10) + rg = self._create_rng().rg + vals = rg.pareto(3.0, 10) assert_(len(vals) == 10) def test_poisson(self): - vals = self.rg.poisson(10, 10) + rg = self._create_rng().rg + vals = rg.poisson(10, 10) assert_(len(vals) == 10) - vals = self.rg.poisson(np.array([10] * 10)) + vals = rg.poisson(np.array([10] * 10)) assert_(len(vals) == 10) - params_1(self.rg.poisson) + params_1(rg.poisson) def test_power(self): - vals = self.rg.power(0.2, 10) + rg = self._create_rng().rg + vals = rg.power(0.2, 10) assert_(len(vals) == 10) def test_integers(self): - vals = self.rg.integers(10, 20, 10) + rg = self._create_rng().rg + vals = rg.integers(10, 20, 10) assert_(len(vals) == 10) def test_rayleigh(self): - vals = self.rg.rayleigh(0.2, 10) + rg = self._create_rng().rg + vals = rg.rayleigh(0.2, 10) assert_(len(vals) == 10) - params_1(self.rg.rayleigh, bounded=True) + params_1(rg.rayleigh, bounded=True) def test_vonmises(self): - vals = self.rg.vonmises(10, 0.2, 10) + rg = self._create_rng().rg + vals = rg.vonmises(10, 0.2, 10) assert_(len(vals) == 10) def test_wald(self): - vals = self.rg.wald(1.0, 1.0, 10) + rg = self._create_rng().rg + vals = rg.wald(1.0, 1.0, 10) assert_(len(vals) == 10) def test_weibull(self): - vals = self.rg.weibull(1.0, 10) + rg = self._create_rng().rg + vals = rg.weibull(1.0, 10) assert_(len(vals) == 10) def test_zipf(self): - vals = self.rg.zipf(10, 10) + rg = self._create_rng().rg + vec_1d = np.arange(2.0, 102.0) + vec_2d = np.arange(2.0, 102.0)[None, :] + mat = np.arange(2.0, 102.0, 0.01).reshape((100, 100)) + vals = rg.zipf(10, 10) assert_(len(vals) == 10) - vals = self.rg.zipf(self.vec_1d) + vals = rg.zipf(vec_1d) assert_(len(vals) == 100) - vals = self.rg.zipf(self.vec_2d) + vals = rg.zipf(vec_2d) assert_(vals.shape == (1, 100)) - vals = self.rg.zipf(self.mat) + vals = rg.zipf(mat) assert_(vals.shape == (100, 100)) def test_hypergeometric(self): - vals = self.rg.hypergeometric(25, 25, 20) + rg = self._create_rng().rg + vals = rg.hypergeometric(25, 25, 20) assert_(np.isscalar(vals)) - vals = self.rg.hypergeometric(np.array([25] * 10), 25, 20) + vals = rg.hypergeometric(np.array([25] * 10), 25, 20) assert_(vals.shape == (10,)) def test_triangular(self): - vals = self.rg.triangular(-5, 0, 5) + rg = self._create_rng().rg + vals = rg.triangular(-5, 0, 5) assert_(np.isscalar(vals)) - vals = self.rg.triangular(-5, np.array([0] * 10), 5) + vals = rg.triangular(-5, np.array([0] * 10), 5) assert_(vals.shape == (10,)) def test_multivariate_normal(self): + rg = self._create_rng().rg mean = [0, 0] cov = [[1, 0], [0, 100]] # diagonal covariance - x = self.rg.multivariate_normal(mean, cov, 5000) + x = rg.multivariate_normal(mean, cov, 5000) assert_(x.shape == (5000, 2)) - x_zig = self.rg.multivariate_normal(mean, cov, 5000) + x_zig = rg.multivariate_normal(mean, cov, 5000) assert_(x.shape == (5000, 2)) - x_inv = self.rg.multivariate_normal(mean, cov, 5000) + x_inv = rg.multivariate_normal(mean, cov, 5000) assert_(x.shape == (5000, 2)) assert_((x_zig != x_inv).any()) def test_multinomial(self): - vals = self.rg.multinomial(100, [1.0 / 3, 2.0 / 3]) + rg = self._create_rng().rg + vals = rg.multinomial(100, [1.0 / 3, 2.0 / 3]) assert_(vals.shape == (2,)) - vals = self.rg.multinomial(100, [1.0 / 3, 2.0 / 3], size=10) + vals = rg.multinomial(100, [1.0 / 3, 2.0 / 3], size=10) assert_(vals.shape == (10, 2)) def test_dirichlet(self): - s = self.rg.dirichlet((10, 5, 3), 20) + rg = self._create_rng().rg + s = rg.dirichlet((10, 5, 3), 20) assert_(s.shape == (20, 3)) def test_pickle(self): - pick = pickle.dumps(self.rg) + rg = self._create_rng().rg + pick = pickle.dumps(rg) unpick = pickle.loads(pick) - assert_(type(self.rg) == type(unpick)) - assert_(comp_state(self.rg.bit_generator.state, + assert_(type(rg) == type(unpick)) + assert_(comp_state(rg.bit_generator.state, unpick.bit_generator.state)) - pick = pickle.dumps(self.rg) + pick = pickle.dumps(rg) unpick = pickle.loads(pick) - assert_(type(self.rg) == type(unpick)) - assert_(comp_state(self.rg.bit_generator.state, + assert_(type(rg) == type(unpick)) + assert_(comp_state(rg.bit_generator.state, unpick.bit_generator.state)) def test_seed_array(self): - if self.seed_vector_bits is None: - bitgen_name = self.bit_generator.__name__ + data = self._create_rng() + if data.seed_vector_bits is None: + bitgen_name = data.bit_generator.__name__ pytest.skip(f'Vector seeding is not supported by {bitgen_name}') - if self.seed_vector_bits == 32: + if data.seed_vector_bits == 32: dtype = np.uint32 else: dtype = np.uint64 seed = np.array([1], dtype=dtype) - bg = self.bit_generator(seed) + bg = data.bit_generator(seed) state1 = bg.state - bg = self.bit_generator(1) + bg = data.bit_generator(1) state2 = bg.state assert_(comp_state(state1, state2)) seed = np.arange(4, dtype=dtype) - bg = self.bit_generator(seed) + bg = data.bit_generator(seed) state1 = bg.state - bg = self.bit_generator(seed[0]) + bg = data.bit_generator(seed[0]) state2 = bg.state assert_(not comp_state(state1, state2)) seed = np.arange(1500, dtype=dtype) - bg = self.bit_generator(seed) + bg = data.bit_generator(seed) state1 = bg.state - bg = self.bit_generator(seed[0]) + bg = data.bit_generator(seed[0]) state2 = bg.state assert_(not comp_state(state1, state2)) seed = 2 ** np.mod(np.arange(1500, dtype=dtype), - self.seed_vector_bits - 1) + 1 - bg = self.bit_generator(seed) + data.seed_vector_bits - 1) + 1 + bg = data.bit_generator(seed) state1 = bg.state - bg = self.bit_generator(seed[0]) + bg = data.bit_generator(seed[0]) state2 = bg.state assert_(not comp_state(state1, state2)) def test_uniform_float(self): - rg = Generator(self.bit_generator(12345)) + bit_generator = self._create_rng().bit_generator + rg = Generator(bit_generator(12345)) warmup(rg) state = rg.bit_generator.state r1 = rg.random(11, dtype=np.float32) - rg2 = Generator(self.bit_generator()) + rg2 = Generator(bit_generator()) warmup(rg2) rg2.bit_generator.state = state r2 = rg2.random(11, dtype=np.float32) @@ -497,11 +550,12 @@ def test_uniform_float(self): assert_(comp_state(rg.bit_generator.state, rg2.bit_generator.state)) def test_gamma_floats(self): - rg = Generator(self.bit_generator()) + bit_generator = self._create_rng().bit_generator + rg = Generator(bit_generator()) warmup(rg) state = rg.bit_generator.state r1 = rg.standard_gamma(4.0, 11, dtype=np.float32) - rg2 = Generator(self.bit_generator()) + rg2 = Generator(bit_generator()) warmup(rg2) rg2.bit_generator.state = state r2 = rg2.standard_gamma(4.0, 11, dtype=np.float32) @@ -510,11 +564,12 @@ def test_gamma_floats(self): assert_(comp_state(rg.bit_generator.state, rg2.bit_generator.state)) def test_normal_floats(self): - rg = Generator(self.bit_generator()) + bit_generator = self._create_rng().bit_generator + rg = Generator(bit_generator()) warmup(rg) state = rg.bit_generator.state r1 = rg.standard_normal(11, dtype=np.float32) - rg2 = Generator(self.bit_generator()) + rg2 = Generator(bit_generator()) warmup(rg2) rg2.bit_generator.state = state r2 = rg2.standard_normal(11, dtype=np.float32) @@ -523,11 +578,12 @@ def test_normal_floats(self): assert_(comp_state(rg.bit_generator.state, rg2.bit_generator.state)) def test_normal_zig_floats(self): - rg = Generator(self.bit_generator()) + bit_generator = self._create_rng().bit_generator + rg = Generator(bit_generator()) warmup(rg) state = rg.bit_generator.state r1 = rg.standard_normal(11, dtype=np.float32) - rg2 = Generator(self.bit_generator()) + rg2 = Generator(bit_generator()) warmup(rg2) rg2.bit_generator.state = state r2 = rg2.standard_normal(11, dtype=np.float32) @@ -536,7 +592,7 @@ def test_normal_zig_floats(self): assert_(comp_state(rg.bit_generator.state, rg2.bit_generator.state)) def test_output_fill(self): - rg = self.rg + rg = self._create_rng().rg state = rg.bit_generator.state size = (31, 7, 97) existing = np.empty(size) @@ -558,7 +614,7 @@ def test_output_fill(self): assert_equal(direct, existing) def test_output_filling_uniform(self): - rg = self.rg + rg = self._create_rng().rg state = rg.bit_generator.state size = (31, 7, 97) existing = np.empty(size) @@ -576,7 +632,7 @@ def test_output_filling_uniform(self): assert_equal(direct, existing) def test_output_filling_exponential(self): - rg = self.rg + rg = self._create_rng().rg state = rg.bit_generator.state size = (31, 7, 97) existing = np.empty(size) @@ -594,7 +650,7 @@ def test_output_filling_exponential(self): assert_equal(direct, existing) def test_output_filling_gamma(self): - rg = self.rg + rg = self._create_rng().rg state = rg.bit_generator.state size = (31, 7, 97) existing = np.zeros(size) @@ -612,7 +668,7 @@ def test_output_filling_gamma(self): assert_equal(direct, existing) def test_output_filling_gamma_broadcast(self): - rg = self.rg + rg = self._create_rng().rg state = rg.bit_generator.state size = (31, 7, 97) mu = np.arange(97.0) + 1.0 @@ -631,7 +687,7 @@ def test_output_filling_gamma_broadcast(self): assert_equal(direct, existing) def test_output_fill_error(self): - rg = self.rg + rg = self._create_rng().rg size = (31, 7, 97) existing = np.empty(size) with pytest.raises(TypeError): @@ -653,7 +709,14 @@ def test_output_fill_error(self): with pytest.raises(ValueError): rg.standard_gamma(1.0, out=existing[::3]) + @pytest.mark.parametrize("dtype", DTYPES_BOOL_INT_UINT) def test_integers_broadcast(self, dtype): + rg = self._create_rng().rg + initial_state = rg.bit_generator.state + + def reset_state(rng): + rng.bit_generator.state = initial_state + if dtype == np.bool: upper = 2 lower = 0 @@ -661,45 +724,50 @@ def test_integers_broadcast(self, dtype): info = np.iinfo(dtype) upper = int(info.max) + 1 lower = info.min - self._reset_state() - a = self.rg.integers(lower, [upper] * 10, dtype=dtype) - self._reset_state() - b = self.rg.integers([lower] * 10, upper, dtype=dtype) + reset_state(rg) + rg.bit_generator.state = initial_state + a = rg.integers(lower, [upper] * 10, dtype=dtype) + reset_state(rg) + b = rg.integers([lower] * 10, upper, dtype=dtype) assert_equal(a, b) - self._reset_state() - c = self.rg.integers(lower, upper, size=10, dtype=dtype) + reset_state(rg) + c = rg.integers(lower, upper, size=10, dtype=dtype) assert_equal(a, c) - self._reset_state() - d = self.rg.integers(np.array( + reset_state(rg) + d = rg.integers(np.array( [lower] * 10), np.array([upper], dtype=object), size=10, dtype=dtype) assert_equal(a, d) - self._reset_state() - e = self.rg.integers( + reset_state(rg) + e = rg.integers( np.array([lower] * 10), np.array([upper] * 10), size=10, dtype=dtype) assert_equal(a, e) - self._reset_state() - a = self.rg.integers(0, upper, size=10, dtype=dtype) - self._reset_state() - b = self.rg.integers([upper] * 10, dtype=dtype) + reset_state(rg) + a = rg.integers(0, upper, size=10, dtype=dtype) + reset_state(rg) + b = rg.integers([upper] * 10, dtype=dtype) assert_equal(a, b) + @pytest.mark.parametrize("dtype", DTYPES_BOOL_INT_UINT) def test_integers_numpy(self, dtype): + rg = self._create_rng().rg high = np.array([1]) low = np.array([0]) - out = self.rg.integers(low, high, dtype=dtype) + out = rg.integers(low, high, dtype=dtype) assert out.shape == (1,) - out = self.rg.integers(low[0], high, dtype=dtype) + out = rg.integers(low[0], high, dtype=dtype) assert out.shape == (1,) - out = self.rg.integers(low, high[0], dtype=dtype) + out = rg.integers(low, high[0], dtype=dtype) assert out.shape == (1,) + @pytest.mark.parametrize("dtype", DTYPES_BOOL_INT_UINT) def test_integers_broadcast_errors(self, dtype): + rg = self._create_rng().rg if dtype == np.bool: upper = 2 lower = 0 @@ -708,102 +776,97 @@ def test_integers_broadcast_errors(self, dtype): upper = int(info.max) + 1 lower = info.min with pytest.raises(ValueError): - self.rg.integers(lower, [upper + 1] * 10, dtype=dtype) + rg.integers(lower, [upper + 1] * 10, dtype=dtype) with pytest.raises(ValueError): - self.rg.integers(lower - 1, [upper] * 10, dtype=dtype) + rg.integers(lower - 1, [upper] * 10, dtype=dtype) with pytest.raises(ValueError): - self.rg.integers([lower - 1], [upper] * 10, dtype=dtype) + rg.integers([lower - 1], [upper] * 10, dtype=dtype) with pytest.raises(ValueError): - self.rg.integers([0], [0], dtype=dtype) + rg.integers([0], [0], dtype=dtype) class TestMT19937(RNG): @classmethod - def setup_class(cls): - cls.bit_generator = MT19937 - cls.advance = None - cls.seed = [2 ** 21 + 2 ** 16 + 2 ** 5 + 1] - cls.rg = Generator(cls.bit_generator(*cls.seed)) - cls.initial_state = cls.rg.bit_generator.state - cls.seed_vector_bits = 32 - cls._extra_setup() - cls.seed_error = ValueError + def _create_rng(cls): + bit_generator = MT19937 + advance = None + seed = [2 ** 21 + 2 ** 16 + 2 ** 5 + 1] + rg = Generator(bit_generator(*seed)) + seed_vector_bits = 32 + return RNGData(bit_generator, advance, seed, rg, seed_vector_bits) def test_numpy_state(self): + rg = self._create_rng().rg nprg = np.random.RandomState() nprg.standard_normal(99) state = nprg.get_state() - self.rg.bit_generator.state = state - state2 = self.rg.bit_generator.state + rg.bit_generator.state = state + state2 = rg.bit_generator.state assert_((state[1] == state2['state']['key']).all()) assert_(state[2] == state2['state']['pos']) class TestPhilox(RNG): @classmethod - def setup_class(cls): - cls.bit_generator = Philox - cls.advance = 2**63 + 2**31 + 2**15 + 1 - cls.seed = [12345] - cls.rg = Generator(cls.bit_generator(*cls.seed)) - cls.initial_state = cls.rg.bit_generator.state - cls.seed_vector_bits = 64 - cls._extra_setup() + def _create_rng(cls): + bit_generator = Philox + advance = 2**63 + 2**31 + 2**15 + 1 + seed = [12345] + rg = Generator(bit_generator(*seed)) + seed_vector_bits = 64 + return RNGData(bit_generator, advance, seed, rg, seed_vector_bits) class TestSFC64(RNG): @classmethod - def setup_class(cls): - cls.bit_generator = SFC64 - cls.advance = None - cls.seed = [12345] - cls.rg = Generator(cls.bit_generator(*cls.seed)) - cls.initial_state = cls.rg.bit_generator.state - cls.seed_vector_bits = 192 - cls._extra_setup() + def _create_rng(cls): + bit_generator = SFC64 + advance = None + seed = [12345] + rg = Generator(bit_generator(*seed)) + seed_vector_bits = 192 + return RNGData(bit_generator, advance, seed, rg, seed_vector_bits) class TestPCG64(RNG): @classmethod - def setup_class(cls): - cls.bit_generator = PCG64 - cls.advance = 2**63 + 2**31 + 2**15 + 1 - cls.seed = [12345] - cls.rg = Generator(cls.bit_generator(*cls.seed)) - cls.initial_state = cls.rg.bit_generator.state - cls.seed_vector_bits = 64 - cls._extra_setup() + def _create_rng(cls): + bit_generator = PCG64 + advance = 2**63 + 2**31 + 2**15 + 1 + seed = [12345] + rg = Generator(bit_generator(*seed)) + seed_vector_bits = 64 + return RNGData(bit_generator, advance, seed, rg, seed_vector_bits) class TestPCG64DXSM(RNG): @classmethod - def setup_class(cls): - cls.bit_generator = PCG64DXSM - cls.advance = 2**63 + 2**31 + 2**15 + 1 - cls.seed = [12345] - cls.rg = Generator(cls.bit_generator(*cls.seed)) - cls.initial_state = cls.rg.bit_generator.state - cls.seed_vector_bits = 64 - cls._extra_setup() + def _create_rng(cls): + bit_generator = PCG64DXSM + advance = 2**63 + 2**31 + 2**15 + 1 + seed = [12345] + rg = Generator(bit_generator(*seed)) + seed_vector_bits = 64 + return RNGData(bit_generator, advance, seed, rg, seed_vector_bits) class TestDefaultRNG(RNG): @classmethod - def setup_class(cls): + def _create_rng(cls): # This will duplicate some tests that directly instantiate a fresh # Generator(), but that's okay. - cls.bit_generator = PCG64 - cls.advance = 2**63 + 2**31 + 2**15 + 1 - cls.seed = [12345] - cls.rg = np.random.default_rng(*cls.seed) - cls.initial_state = cls.rg.bit_generator.state - cls.seed_vector_bits = 64 - cls._extra_setup() + bit_generator = PCG64 + advance = 2**63 + 2**31 + 2**15 + 1 + seed = [12345] + rg = np.random.default_rng(*seed) + seed_vector_bits = 64 + return RNGData(bit_generator, advance, seed, rg, seed_vector_bits) def test_default_is_pcg64(self): # In order to change the default BitGenerator, we'll go through # a deprecation cycle to move to a different function. - assert_(isinstance(self.rg.bit_generator, PCG64)) + rg = self._create_rng().rg + assert_(isinstance(rg.bit_generator, PCG64)) def test_seed(self): np.random.default_rng() diff --git a/numpy/strings/__init__.pyi b/numpy/strings/__init__.pyi index 2f924dca0b5d..b2fb363531d4 100644 --- a/numpy/strings/__init__.pyi +++ b/numpy/strings/__init__.pyi @@ -36,6 +36,7 @@ from numpy._core.strings import ( rjust, rpartition, rstrip, + slice, startswith, str_len, strip, @@ -92,4 +93,5 @@ __all__ = [ "decode", "encode", "translate", + "slice", ] diff --git a/numpy/testing/__init__.pyi b/numpy/testing/__init__.pyi index ba3c9a2b7a44..95db8728ef70 100644 --- a/numpy/testing/__init__.pyi +++ b/numpy/testing/__init__.pyi @@ -1,9 +1,12 @@ from unittest import TestCase -from . import overrides -from ._private.utils import ( +from . import _private as _private, overrides +from ._private import extbuild as extbuild +from ._private.utils import ( # type: ignore[deprecated] + BLAS_SUPPORTS_FPE, HAS_LAPACK64, HAS_REFCOUNT, + IS_64BIT, IS_EDITABLE, IS_INSTALLED, IS_MUSL, @@ -51,8 +54,10 @@ from ._private.utils import ( ) __all__ = [ + "BLAS_SUPPORTS_FPE", "HAS_LAPACK64", "HAS_REFCOUNT", + "IS_64BIT", "IS_EDITABLE", "IS_INSTALLED", "IS_MUSL", diff --git a/numpy/testing/_private/extbuild.pyi b/numpy/testing/_private/extbuild.pyi index 609a45e79d16..c1ae507d6a49 100644 --- a/numpy/testing/_private/extbuild.pyi +++ b/numpy/testing/_private/extbuild.pyi @@ -10,7 +10,7 @@ def build_and_import_extension( *, prologue: str = "", build_dir: pathlib.Path | None = None, - include_dirs: Sequence[str] = [], + include_dirs: Sequence[str] | None = None, more_init: str = "", ) -> types.ModuleType: ... @@ -20,6 +20,6 @@ def compile_extension_module( builddir: pathlib.Path, include_dirs: Sequence[str], source_string: str, - libraries: Sequence[str] = [], - library_dirs: Sequence[str] = [], + libraries: Sequence[str] | None = None, + library_dirs: Sequence[str] | None = None, ) -> pathlib.Path: ... diff --git a/numpy/testing/_private/utils.py b/numpy/testing/_private/utils.py index d7ceaeab72cc..87d9f0394fb3 100644 --- a/numpy/testing/_private/utils.py +++ b/numpy/testing/_private/utils.py @@ -6,6 +6,7 @@ import contextlib import gc import importlib.metadata +import importlib.util import operator import os import pathlib @@ -25,7 +26,7 @@ import numpy as np import numpy.linalg._umath_linalg -from numpy import isfinite, isinf, isnan +from numpy import isfinite, isnan from numpy._core import arange, array, array_repr, empty, float32, intp, isnat, ndarray __all__ = [ @@ -42,6 +43,7 @@ 'assert_no_gc_cycles', 'break_cycles', 'HAS_LAPACK64', 'IS_PYSTON', 'IS_MUSL', 'check_support_sve', 'NOGIL_BUILD', 'IS_EDITABLE', 'IS_INSTALLED', 'NUMPY_ROOT', 'run_threaded', 'IS_64BIT', + 'BLAS_SUPPORTS_FPE', ] @@ -89,6 +91,8 @@ class KnownFailureException(Exception): IS_PYPY = sys.implementation.name == 'pypy' IS_PYSTON = hasattr(sys, "pyston_version_info") HAS_REFCOUNT = getattr(sys, 'getrefcount', None) is not None and not IS_PYSTON +BLAS_SUPPORTS_FPE = np._core._multiarray_umath._blas_supports_fpe(None) + HAS_LAPACK64 = numpy.linalg._umath_linalg._ilp64 IS_MUSL = False @@ -293,9 +297,10 @@ def assert_equal(actual, desired, err_msg='', verbose=True, *, strict=False): Notes ----- - By default, when one of `actual` and `desired` is a scalar and the other is - an array, the function checks that each element of the array is equal to - the scalar. This behaviour can be disabled by setting ``strict==True``. + When one of `actual` and `desired` is a scalar and the other is array_like, the + function checks that each element of the array_like is equal to the scalar. + Note that empty arrays are therefore considered equal to scalars. + This behaviour can be disabled by setting ``strict==True``. Examples -------- @@ -353,7 +358,7 @@ def assert_equal(actual, desired, err_msg='', verbose=True, *, strict=False): if not isinstance(actual, dict): raise AssertionError(repr(type(actual))) assert_equal(len(actual), len(desired), err_msg, verbose) - for k, i in desired.items(): + for k in desired: if k not in actual: raise AssertionError(repr(k)) assert_equal(actual[k], desired[k], f'key={k!r}\n{err_msg}', @@ -563,6 +568,8 @@ def assert_almost_equal(actual, desired, decimal=7, err_msg='', verbose=True): Arrays are not almost equal to 9 decimals Mismatched elements: 1 / 2 (50%) + Mismatch at index: + [1]: 2.3333333333333 (ACTUAL), 2.33333334 (DESIRED) Max absolute difference among violations: 6.66669964e-09 Max relative difference among violations: 2.85715698e-09 ACTUAL: array([1. , 2.333333333]) @@ -745,6 +752,24 @@ def istime(x): def isvstring(x): return x.dtype.char == "T" + def robust_any_difference(x, y): + # We include work-arounds here to handle three types of slightly + # pathological ndarray subclasses: + # (1) all() on fully masked arrays returns np.ma.masked, so we use != True + # (np.ma.masked != True evaluates as np.ma.masked, which is falsy). + # (2) __eq__ on some ndarray subclasses returns Python booleans + # instead of element-wise comparisons, so we cast to np.bool() in + # that case (or in case __eq__ returns some other value with no + # all() method). + # (3) subclasses with bare-bones __array_function__ implementations may + # not implement np.all(), so favor using the .all() method + # We are not committed to supporting cases (2) and (3), but it's nice to + # support them if possible. + result = x == y + if not hasattr(result, "all") or not callable(result.all): + result = np.bool(result) + return result.all() != True + def func_assert_same_pos(x, y, func=isnan, hasval='nan'): """Handling nan/inf. @@ -756,18 +781,7 @@ def func_assert_same_pos(x, y, func=isnan, hasval='nan'): x_id = func(x) y_id = func(y) - # We include work-arounds here to handle three types of slightly - # pathological ndarray subclasses: - # (1) all() on `masked` array scalars can return masked arrays, so we - # use != True - # (2) __eq__ on some ndarray subclasses returns Python booleans - # instead of element-wise comparisons, so we cast to np.bool() and - # use isinstance(..., bool) checks - # (3) subclasses with bare-bones __array_function__ implementations may - # not implement np.all(), so favor using the .all() method - # We are not committed to supporting such subclasses, but it's nice to - # support them if possible. - if np.bool(x_id == y_id).all() != True: + if robust_any_difference(x_id, y_id): msg = build_err_msg( [x, y], err_msg + '\n%s location mismatch:' @@ -777,6 +791,9 @@ def func_assert_same_pos(x, y, func=isnan, hasval='nan'): raise AssertionError(msg) # If there is a scalar, then here we know the array has the same # flag as it everywhere, so we should return the scalar flag. + # np.ma.masked is also handled and converted to np.False_ (even if the other + # array has nans/infs etc.; that's OK given the handling later of fully-masked + # results). if isinstance(x_id, bool) or x_id.ndim == 0: return np.bool(x_id) elif isinstance(y_id, bool) or y_id.ndim == 0: @@ -784,6 +801,29 @@ def func_assert_same_pos(x, y, func=isnan, hasval='nan'): else: return y_id + def assert_same_inf_values(x, y, infs_mask): + """ + Verify all inf values match in the two arrays + """ + __tracebackhide__ = True # Hide traceback for py.test + + if not infs_mask.any(): + return + if x.ndim > 0 and y.ndim > 0: + x = x[infs_mask] + y = y[infs_mask] + else: + assert infs_mask.all() + + if robust_any_difference(x, y): + msg = build_err_msg( + [x, y], + err_msg + '\ninf values mismatch:', + verbose=verbose, header=header, + names=names, + precision=precision) + raise AssertionError(msg) + try: if strict: cond = x.shape == y.shape and x.dtype == y.dtype @@ -808,12 +848,15 @@ def func_assert_same_pos(x, y, func=isnan, hasval='nan'): flagged = func_assert_same_pos(x, y, func=isnan, hasval='nan') if equal_inf: - flagged |= func_assert_same_pos(x, y, - func=lambda xy: xy == +inf, - hasval='+inf') - flagged |= func_assert_same_pos(x, y, - func=lambda xy: xy == -inf, - hasval='-inf') + # If equal_nan=True, skip comparing nans below for equality if they are + # also infs (e.g. inf+nanj) since that would always fail. + isinf_func = lambda xy: np.logical_and(np.isinf(xy), np.invert(flagged)) + infs_mask = func_assert_same_pos( + x, y, + func=isinf_func, + hasval='inf') + assert_same_inf_values(x, y, infs_mask) + flagged |= infs_mask elif istime(x) and istime(y): # If one is datetime64 and the other timedelta64 there is no point @@ -864,6 +907,31 @@ def func_assert_same_pos(x, y, func=isnan, hasval='nan'): percent_mismatch = 100 * n_mismatch / n_elements remarks = [f'Mismatched elements: {n_mismatch} / {n_elements} ' f'({percent_mismatch:.3g}%)'] + if invalids.ndim != 0: + if flagged.ndim > 0: + positions = np.argwhere(np.asarray(~flagged))[invalids] + else: + positions = np.argwhere(np.asarray(invalids)) + s = "\n".join( + [ + f" {p.tolist()}: {ox if ox.ndim == 0 else ox[tuple(p)]} " + f"({names[0]}), {oy if oy.ndim == 0 else oy[tuple(p)]} " + f"({names[1]})" + for p in positions[:5] + ] + ) + if len(positions) == 1: + remarks.append( + f"Mismatch at index:\n{s}" + ) + elif len(positions) <= 5: + remarks.append( + f"Mismatch at indices:\n{s}" + ) + else: + remarks.append( + f"First 5 mismatches are at indices:\n{s}" + ) with errstate(all='ignore'): # ignore errors for non-numeric types @@ -980,9 +1048,10 @@ def assert_array_equal(actual, desired, err_msg='', verbose=True, *, Notes ----- - When one of `actual` and `desired` is a scalar and the other is array_like, - the function checks that each element of the array_like object is equal to - the scalar. This behaviour can be disabled with the `strict` parameter. + When one of `actual` and `desired` is a scalar and the other is array_like, the + function checks that each element of the array_like is equal to the scalar. + Note that empty arrays are therefore considered equal to scalars. + This behaviour can be disabled by setting ``strict==True``. Examples -------- @@ -1001,6 +1070,8 @@ def assert_array_equal(actual, desired, err_msg='', verbose=True, *, Arrays are not equal Mismatched elements: 1 / 3 (33.3%) + Mismatch at index: + [1]: 3.141592653589793 (ACTUAL), 3.1415926535897927 (DESIRED) Max absolute difference among violations: 4.4408921e-16 Max relative difference among violations: 1.41357986e-16 ACTUAL: array([1. , 3.141593, nan]) @@ -1114,6 +1185,8 @@ def assert_array_almost_equal(actual, desired, decimal=6, err_msg='', Arrays are not almost equal to 5 decimals Mismatched elements: 1 / 3 (33.3%) + Mismatch at index: + [1]: 2.33333 (ACTUAL), 2.33339 (DESIRED) Max absolute difference among violations: 6.e-05 Max relative difference among violations: 2.57136612e-05 ACTUAL: array([1. , 2.33333, nan]) @@ -1133,24 +1206,9 @@ def assert_array_almost_equal(actual, desired, decimal=6, err_msg='', """ __tracebackhide__ = True # Hide traceback for py.test from numpy._core import number, result_type - from numpy._core.fromnumeric import any as npany from numpy._core.numerictypes import issubdtype def compare(x, y): - try: - if npany(isinf(x)) or npany(isinf(y)): - xinfid = isinf(x) - yinfid = isinf(y) - if not (xinfid == yinfid).all(): - return False - # if one item, x and y is +- inf - if x.size == y.size == 1: - return x == y - x = x[~xinfid] - y = y[~yinfid] - except (TypeError, NotImplementedError): - pass - # make sure y is an inexact type to avoid abs(MIN_INT); will cause # casting of x later. dtype = result_type(y, 1.) @@ -1235,6 +1293,8 @@ def assert_array_less(x, y, err_msg='', verbose=True, *, strict=False): Arrays are not strictly ordered `x < y` Mismatched elements: 1 / 3 (33.3%) + Mismatch at index: + [0]: 1.0 (x), 1.0 (y) Max absolute difference among violations: 0. Max relative difference among violations: 0. x: array([ 1., 1., nan]) @@ -1382,12 +1442,13 @@ def rundocs(filename=None, raise_on_error=True): """ import doctest - from numpy.distutils.misc_util import exec_mod_from_location if filename is None: f = sys._getframe(1) filename = f.f_globals['__file__'] name = os.path.splitext(os.path.basename(filename))[0] - m = exec_mod_from_location(name, filename) + spec = importlib.util.spec_from_file_location(name, filename) + m = importlib.util.module_from_spec(spec) + spec.loader.exec_module(m) tests = doctest.DocTestFinder().find(m) runner = doctest.DocTestRunner(verbose=False) @@ -1613,9 +1674,10 @@ def assert_allclose(actual, desired, rtol=1e-7, atol=0, equal_nan=True, contrast to the standard usage in numpy, NaNs are compared like numbers, no assertion is raised if both objects have NaNs in the same positions. - The test is equivalent to ``allclose(actual, desired, rtol, atol)`` (note - that ``allclose`` has different default values). It compares the difference - between `actual` and `desired` to ``atol + rtol * abs(desired)``. + The test is equivalent to ``allclose(actual, desired, rtol, atol)``, + except that it is stricter: it doesn't broadcast its operands, and has + tighter default tolerance values. It compares the difference between + `actual` and `desired` to ``atol + rtol * abs(desired)``. Parameters ---------- @@ -1651,10 +1713,10 @@ def assert_allclose(actual, desired, rtol=1e-7, atol=0, equal_nan=True, Notes ----- - When one of `actual` and `desired` is a scalar and the other is - array_like, the function performs the comparison as if the scalar were - broadcasted to the shape of the array. - This behaviour can be disabled with the `strict` parameter. + When one of `actual` and `desired` is a scalar and the other is array_like, the + function performs the comparison as if the scalar were broadcasted to the shape + of the array. Note that empty arrays are therefore considered equal to scalars. + This behaviour can be disabled by setting ``strict==True``. Examples -------- @@ -1918,7 +1980,7 @@ def integer_repr(x): @contextlib.contextmanager def _assert_warns_context(warning_class, name=None): __tracebackhide__ = True # Hide traceback for py.test - with suppress_warnings() as sup: + with suppress_warnings(_warn=False) as sup: l = sup.record(warning_class) yield if not len(l) > 0: @@ -1942,6 +2004,11 @@ def assert_warns(warning_class, *args, **kwargs): The ability to be used as a context manager is new in NumPy v1.11.0. + .. deprecated:: 2.4 + + This is deprecated. Use `warnings.catch_warnings` or + ``pytest.warns`` instead. + Parameters ---------- warning_class : class @@ -1969,6 +2036,11 @@ def assert_warns(warning_class, *args, **kwargs): >>> ret = np.testing.assert_warns(DeprecationWarning, deprecated_func, 4) >>> assert ret == 16 """ + warnings.warn( + "NumPy warning suppression and assertion utilities are deprecated. " + "Use warnings.catch_warnings, warnings.filterwarnings, pytest.warns, " + "or pytest.filterwarnings instead. (Deprecated NumPy 2.4)", + DeprecationWarning, stacklevel=2) if not args and not kwargs: return _assert_warns_context(warning_class) elif len(args) < 1: @@ -2221,6 +2293,11 @@ class suppress_warnings: tests might need to see the warning. Additionally it allows easier specificity for testing warnings and can be nested. + .. deprecated:: 2.4 + + This is deprecated. Use `warnings.filterwarnings` or + ``pytest.filterwarnings`` instead. + Parameters ---------- forwarding_rule : str, optional @@ -2281,7 +2358,13 @@ def some_function(): # do something which causes a warning in np.ma.core pass """ - def __init__(self, forwarding_rule="always"): + def __init__(self, forwarding_rule="always", _warn=True): + if _warn: + warnings.warn( + "NumPy warning suppression and assertion utilities are deprecated. " + "Use warnings.catch_warnings, warnings.filterwarnings, pytest.warns, " + "or pytest.filterwarnings instead. (Deprecated NumPy 2.4)", + DeprecationWarning, stacklevel=2) self._entered = False # Suppressions are either instance or defined inside one with block: @@ -2747,3 +2830,30 @@ def run_threaded(func, max_workers=8, pass_count=False, barrier.abort() for f in futures: f.result() + + +def requires_deep_recursion(func): + """Decorator to skip test if deep recursion is not supported.""" + import pytest + + @wraps(func) + def wrapper(*args, **kwargs): + if IS_PYSTON: + pytest.skip("Pyston disables recursion checking") + if IS_WASM: + pytest.skip("WASM has limited stack size") + cflags = sysconfig.get_config_var('CFLAGS') or '' + config_args = sysconfig.get_config_var('CONFIG_ARGS') or '' + address_sanitizer = ( + '-fsanitize=address' in cflags or + '--with-address-sanitizer' in config_args + ) + thread_sanitizer = ( + '-fsanitize=thread' in cflags or + '--with-thread-sanitizer' in config_args + ) + if address_sanitizer or thread_sanitizer: + pytest.skip("AddressSanitizer and ThreadSanitizer do not support " + "deep recursion") + return func(*args, **kwargs) + return wrapper diff --git a/numpy/testing/_private/utils.pyi b/numpy/testing/_private/utils.pyi index 4e3b60a0ef70..5cb7f746380d 100644 --- a/numpy/testing/_private/utils.pyi +++ b/numpy/testing/_private/utils.pyi @@ -3,6 +3,7 @@ import sys import types import unittest import warnings +from _typeshed import ConvertibleToFloat, GenericPath, StrOrBytesPath, StrPath from collections.abc import Callable, Iterable, Sequence from contextlib import _GeneratorContextManager from pathlib import Path @@ -13,21 +14,16 @@ from typing import ( ClassVar, Final, Generic, + Literal as L, NoReturn, - ParamSpec, Self, SupportsIndex, - TypeAlias, - TypeVarTuple, overload, type_check_only, ) -from typing import Literal as L +from typing_extensions import TypeVar, deprecated from unittest.case import SkipTest -from _typeshed import ConvertibleToFloat, GenericPath, StrOrBytesPath, StrPath -from typing_extensions import TypeVar - import numpy as np from numpy._typing import ( ArrayLike, @@ -45,9 +41,13 @@ __all__ = [ # noqa: RUF022 "IS_PYPY", "IS_PYSTON", "IS_WASM", + "IS_INSTALLED", + "IS_64BIT", "HAS_LAPACK64", "HAS_REFCOUNT", + "BLAS_SUPPORTS_FPE", "NOGIL_BUILD", + "NUMPY_ROOT", "assert_", "assert_array_almost_equal_nulp", "assert_raises_regex", @@ -88,26 +88,20 @@ __all__ = [ # noqa: RUF022 ### -_T = TypeVar("_T") -_Ts = TypeVarTuple("_Ts") -_Tss = ParamSpec("_Tss") -_ET = TypeVar("_ET", bound=BaseException, default=BaseException) -_FT = TypeVar("_FT", bound=Callable[..., Any]) _W_co = TypeVar("_W_co", bound=_WarnLog | None, default=_WarnLog | None, covariant=True) -_T_or_bool = TypeVar("_T_or_bool", default=bool) -_StrLike: TypeAlias = str | bytes -_RegexLike: TypeAlias = _StrLike | Pattern[Any] -_NumericArrayLike: TypeAlias = _ArrayLikeNumber_co | _ArrayLikeObject_co +type _StrLike = str | bytes +type _RegexLike = _StrLike | Pattern[Any] +type _NumericArrayLike = _ArrayLikeNumber_co | _ArrayLikeObject_co -_ExceptionSpec: TypeAlias = type[_ET] | tuple[type[_ET], ...] -_WarningSpec: TypeAlias = type[Warning] -_WarnLog: TypeAlias = list[warnings.WarningMessage] -_ToModules: TypeAlias = Iterable[types.ModuleType] +type _ExceptionSpec[ExceptionT: BaseException] = type[ExceptionT] | tuple[type[ExceptionT], ...] +type _WarningSpec = type[Warning] +type _WarnLog = list[warnings.WarningMessage] +type _ToModules = Iterable[types.ModuleType] # Must return a bool or an ndarray/generic type that is supported by `np.logical_and.reduce` -_ComparisonFunc: TypeAlias = Callable[ - [NDArray[Any], NDArray[Any]], +type _ComparisonFunc = Callable[ + [np.ndarray, np.ndarray], bool | np.bool | np.number | NDArray[np.bool | np.number | np.object_], ] @@ -131,15 +125,16 @@ IS_MUSL: Final[bool] = ... IS_PYPY: Final[bool] = ... IS_PYSTON: Final[bool] = ... IS_WASM: Final[bool] = ... +IS_64BIT: Final[bool] = ... HAS_REFCOUNT: Final[bool] = ... HAS_LAPACK64: Final[bool] = ... +BLAS_SUPPORTS_FPE: Final[bool] = ... NOGIL_BUILD: Final[bool] = ... class KnownFailureException(Exception): ... class IgnoreException(Exception): ... -# NOTE: `warnings.catch_warnings` is incorrectly defined as invariant in typeshed -class clear_and_catch_warnings(warnings.catch_warnings[_W_co], Generic[_W_co]): # type: ignore[type-var] # pyright: ignore[reportInvalidTypeArguments] +class clear_and_catch_warnings(warnings.catch_warnings[_W_co], Generic[_W_co]): class_modules: ClassVar[tuple[types.ModuleType, ...]] = () modules: Final[set[types.ModuleType]] @overload # record: True @@ -149,12 +144,13 @@ class clear_and_catch_warnings(warnings.catch_warnings[_W_co], Generic[_W_co]): @overload # record; bool def __init__(self, /, record: bool, modules: _ToModules = ()) -> None: ... +@deprecated("Please use warnings.filterwarnings or pytest.mark.filterwarnings instead") class suppress_warnings: log: Final[_WarnLog] def __init__(self, /, forwarding_rule: L["always", "module", "once", "location"] = "always") -> None: ... def __enter__(self) -> Self: ... def __exit__(self, cls: type[BaseException] | None, exc: BaseException | None, tb: types.TracebackType | None, /) -> None: ... - def __call__(self, /, func: _FT) -> _FT: ... + def __call__[FuncT: Callable[..., Any]](self, /, func: FuncT) -> FuncT: ... # def filter(self, /, category: type[Warning] = ..., message: str = "", module: types.ModuleType | None = None) -> None: ... @@ -163,14 +159,14 @@ class suppress_warnings: # Contrary to runtime we can't do `os.name` checks while type checking, # only `sys.platform` checks if sys.platform == "win32" or sys.platform == "cygwin": - def memusage(processName: str = ..., instance: int = ...) -> int: ... + def memusage(processName: str = "python", instance: int = 0) -> int: ... elif sys.platform == "linux": - def memusage(_proc_pid_stat: StrOrBytesPath = ...) -> int | None: ... + def memusage(_proc_pid_stat: StrOrBytesPath | None = None) -> int | None: ... else: def memusage() -> NoReturn: ... if sys.platform == "linux": - def jiffies(_proc_pid_stat: StrOrBytesPath = ..., _load_time: list[float] = []) -> int: ... + def jiffies(_proc_pid_stat: StrOrBytesPath | None = None, _load_time: list[float] | None = None) -> int: ... else: def jiffies(_load_time: list[float] = []) -> int: ... @@ -178,10 +174,10 @@ else: def build_err_msg( arrays: Iterable[object], err_msg: object, - header: str = ..., - verbose: bool = ..., - names: Sequence[str] = ..., - precision: SupportsIndex | None = ..., + header: str = "Items are not equal:", + verbose: bool = True, + names: Sequence[str] = ("ACTUAL", "DESIRED"), # = ('ACTUAL', 'DESIRED') + precision: SupportsIndex | None = 8, ) -> str: ... # @@ -285,36 +281,36 @@ def assert_string_equal(actual: str, desired: str) -> None: ... # @overload -def assert_raises( - exception_class: _ExceptionSpec[_ET], +def assert_raises[ExceptionT: BaseException]( + exception_class: _ExceptionSpec[ExceptionT], /, *, msg: str | None = None, -) -> unittest.case._AssertRaisesContext[_ET]: ... +) -> unittest.case._AssertRaisesContext[ExceptionT]: ... @overload -def assert_raises( - exception_class: _ExceptionSpec, - callable: Callable[_Tss, Any], +def assert_raises[**Tss]( + exception_class: _ExceptionSpec[BaseException], + callable: Callable[Tss, Any], /, - *args: _Tss.args, - **kwargs: _Tss.kwargs, + *args: Tss.args, + **kwargs: Tss.kwargs, ) -> None: ... # @overload -def assert_raises_regex( - exception_class: _ExceptionSpec[_ET], +def assert_raises_regex[ExceptionT: BaseException]( + exception_class: _ExceptionSpec[ExceptionT], expected_regexp: _RegexLike, *, msg: str | None = None, -) -> unittest.case._AssertRaisesContext[_ET]: ... +) -> unittest.case._AssertRaisesContext[ExceptionT]: ... @overload -def assert_raises_regex( - exception_class: _ExceptionSpec, +def assert_raises_regex[**Tss]( + exception_class: _ExceptionSpec[BaseException], expected_regexp: _RegexLike, - callable: Callable[_Tss, Any], - *args: _Tss.args, - **kwargs: _Tss.kwargs, + callable: Callable[Tss, Any], + *args: Tss.args, + **kwargs: Tss.kwargs, ) -> None: ... # @@ -360,21 +356,28 @@ def assert_array_max_ulp( # @overload +@deprecated("Please use warnings.catch_warnings or pytest.warns instead") def assert_warns(warning_class: _WarningSpec) -> _GeneratorContextManager[None]: ... @overload -def assert_warns(warning_class: _WarningSpec, func: Callable[_Tss, _T], *args: _Tss.args, **kwargs: _Tss.kwargs) -> _T: ... +@deprecated("Please use warnings.catch_warnings or pytest.warns instead") +def assert_warns[**Tss, ReturnT]( + warning_class: _WarningSpec, + func: Callable[Tss, ReturnT], + *args: Tss.args, + **kwargs: Tss.kwargs, +) -> ReturnT: ... # @overload def assert_no_warnings() -> _GeneratorContextManager[None]: ... @overload -def assert_no_warnings(func: Callable[_Tss, _T], /, *args: _Tss.args, **kwargs: _Tss.kwargs) -> _T: ... +def assert_no_warnings[**Tss, ReturnT](func: Callable[Tss, ReturnT], /, *args: Tss.args, **kwargs: Tss.kwargs) -> ReturnT: ... # @overload def assert_no_gc_cycles() -> _GeneratorContextManager[None]: ... @overload -def assert_no_gc_cycles(func: Callable[_Tss, Any], /, *args: _Tss.args, **kwargs: _Tss.kwargs) -> None: ... +def assert_no_gc_cycles[**Tss](func: Callable[Tss, Any], /, *args: Tss.args, **kwargs: Tss.kwargs) -> None: ... ### @@ -453,7 +456,7 @@ def temppath( ) -> _GeneratorContextManager[AnyStr]: ... # -def check_support_sve(__cache: list[_T_or_bool] = []) -> _T_or_bool: ... # noqa: PYI063 +def check_support_sve(__cache: list[bool] = ..., /) -> bool: ... # stubdefaulter: ignore[missing-default] # def decorate_methods( @@ -473,23 +476,23 @@ def run_threaded( prepare_args: None = None, ) -> None: ... @overload -def run_threaded( - func: Callable[[*_Ts], None], +def run_threaded[*Ts]( + func: Callable[[*Ts], None], max_workers: int, pass_count: bool, pass_barrier: bool, outer_iterations: int, - prepare_args: tuple[*_Ts], + prepare_args: tuple[*Ts], ) -> None: ... @overload -def run_threaded( - func: Callable[[*_Ts], None], +def run_threaded[*Ts]( + func: Callable[[*Ts], None], max_workers: int = 8, pass_count: bool = False, pass_barrier: bool = False, outer_iterations: int = 1, *, - prepare_args: tuple[*_Ts], + prepare_args: tuple[*Ts], ) -> None: ... # diff --git a/numpy/testing/overrides.pyi b/numpy/testing/overrides.pyi index 3fefc3f350da..916154c155b1 100644 --- a/numpy/testing/overrides.pyi +++ b/numpy/testing/overrides.pyi @@ -1,6 +1,5 @@ from collections.abc import Callable, Hashable from typing import Any - from typing_extensions import TypeIs import numpy as np diff --git a/numpy/testing/print_coercion_tables.pyi b/numpy/testing/print_coercion_tables.pyi index c859305f2350..f463a18c05e4 100644 --- a/numpy/testing/print_coercion_tables.pyi +++ b/numpy/testing/print_coercion_tables.pyi @@ -1,6 +1,5 @@ from collections.abc import Iterable from typing import ClassVar, Generic, Self - from typing_extensions import TypeVar import numpy as np diff --git a/numpy/testing/tests/test_utils.py b/numpy/testing/tests/test_utils.py index fcf20091ca8e..6d43343ef98a 100644 --- a/numpy/testing/tests/test_utils.py +++ b/numpy/testing/tests/test_utils.py @@ -36,6 +36,9 @@ class _GenericTest: + def _assert_func(self, *args, **kwargs): + pass + def _test_equal(self, a, b): self._assert_func(a, b) @@ -82,8 +85,8 @@ def test_array_likes(self): class TestArrayEqual(_GenericTest): - def setup_method(self): - self._assert_func = assert_array_equal + def _assert_func(self, *args, **kwargs): + assert_array_equal(*args, **kwargs) def test_generic_rank1(self): """Test rank 1 array for all dtypes.""" @@ -197,6 +200,40 @@ def test_masked_nan_inf(self): self._test_equal(a, b) self._test_equal(b, a) + # Also provides test cases for gh-11121 + def test_masked_scalar(self): + # Test masked scalar vs. plain/masked scalar + for a_val, b_val, b_masked in itertools.product( + [3., np.nan, np.inf], + [3., 4., np.nan, np.inf, -np.inf], + [False, True], + ): + a = np.ma.MaskedArray(a_val, mask=True) + b = np.ma.MaskedArray(b_val, mask=True) if b_masked else np.array(b_val) + self._test_equal(a, b) + self._test_equal(b, a) + + # Test masked scalar vs. plain array + for a_val, b_val in itertools.product( + [3., np.nan, -np.inf], + itertools.product([3., 4., np.nan, np.inf, -np.inf], repeat=2), + ): + a = np.ma.MaskedArray(a_val, mask=True) + b = np.array(b_val) + self._test_equal(a, b) + self._test_equal(b, a) + + # Test masked scalar vs. masked array + for a_val, b_val, b_mask in itertools.product( + [3., np.nan, np.inf], + itertools.product([3., 4., np.nan, np.inf, -np.inf], repeat=2), + itertools.product([False, True], repeat=2), + ): + a = np.ma.MaskedArray(a_val, mask=True) + b = np.ma.MaskedArray(b_val, mask=b_mask) + self._test_equal(a, b) + self._test_equal(b, a) + def test_subclass_that_overrides_eq(self): # While we cannot guarantee testing functions will always work for # subclasses, the tests should ideally rely only on subclasses having @@ -265,6 +302,8 @@ def test_array_vs_array_not_equal(self): b = np.array([34986, 545676, 439655, 0]) expected_msg = ('Mismatched elements: 1 / 4 (25%)\n' + 'Mismatch at index:\n' + ' [3]: 563766 (ACTUAL), 0 (DESIRED)\n' 'Max absolute difference among violations: 563766\n' 'Max relative difference among violations: inf') with pytest.raises(AssertionError, match=re.escape(expected_msg)): @@ -272,6 +311,9 @@ def test_array_vs_array_not_equal(self): a = np.array([34986, 545676, 439655.2, 563766]) expected_msg = ('Mismatched elements: 2 / 4 (50%)\n' + 'Mismatch at indices:\n' + ' [2]: 439655.2 (ACTUAL), 439655 (DESIRED)\n' + ' [3]: 563766.0 (ACTUAL), 0 (DESIRED)\n' 'Max absolute difference among violations: ' '563766.\n' 'Max relative difference among violations: ' @@ -350,8 +392,8 @@ def test_build_err_msg_custom_precision(self): class TestEqual(TestArrayEqual): - def setup_method(self): - self._assert_func = assert_equal + def _assert_func(self, *args, **kwargs): + assert_equal(*args, **kwargs) def test_nan_items(self): self._assert_func(np.nan, np.nan) @@ -445,8 +487,8 @@ def test_object(self): class TestArrayAlmostEqual(_GenericTest): - def setup_method(self): - self._assert_func = assert_array_almost_equal + def _assert_func(self, *args, **kwargs): + assert_array_almost_equal(*args, **kwargs) def test_closeness(self): # Note that in the course of time we ended up with @@ -466,6 +508,8 @@ def test_closeness(self): self._assert_func([1.499999], [0.0], decimal=0) expected_msg = ('Mismatched elements: 1 / 1 (100%)\n' + 'Mismatch at index:\n' + ' [0]: 1.5 (ACTUAL), 0.0 (DESIRED)\n' 'Max absolute difference among violations: 1.5\n' 'Max relative difference among violations: inf') with pytest.raises(AssertionError, match=re.escape(expected_msg)): @@ -474,12 +518,16 @@ def test_closeness(self): a = [1.4999999, 0.00003] b = [1.49999991, 0] expected_msg = ('Mismatched elements: 1 / 2 (50%)\n' + 'Mismatch at index:\n' + ' [1]: 3e-05 (ACTUAL), 0.0 (DESIRED)\n' 'Max absolute difference among violations: 3.e-05\n' 'Max relative difference among violations: inf') with pytest.raises(AssertionError, match=re.escape(expected_msg)): self._assert_func(a, b, decimal=7) expected_msg = ('Mismatched elements: 1 / 2 (50%)\n' + 'Mismatch at index:\n' + ' [1]: 0.0 (ACTUAL), 3e-05 (DESIRED)\n' 'Max absolute difference among violations: 3.e-05\n' 'Max relative difference among violations: 1.') with pytest.raises(AssertionError, match=re.escape(expected_msg)): @@ -493,6 +541,8 @@ def test_simple(self): self._assert_func(x, y, decimal=4) expected_msg = ('Mismatched elements: 1 / 1 (100%)\n' + 'Mismatch at index:\n' + ' [0]: 1234.2222 (ACTUAL), 1234.2223 (DESIRED)\n' 'Max absolute difference among violations: ' '1.e-04\n' 'Max relative difference among violations: ' @@ -504,6 +554,9 @@ def test_array_vs_scalar(self): a = [5498.42354, 849.54345, 0.00] b = 5498.42354 expected_msg = ('Mismatched elements: 2 / 3 (66.7%)\n' + 'Mismatch at indices:\n' + ' [1]: 849.54345 (ACTUAL), 5498.42354 (DESIRED)\n' + ' [2]: 0.0 (ACTUAL), 5498.42354 (DESIRED)\n' 'Max absolute difference among violations: ' '5498.42354\n' 'Max relative difference among violations: 1.') @@ -511,6 +564,9 @@ def test_array_vs_scalar(self): self._assert_func(a, b, decimal=9) expected_msg = ('Mismatched elements: 2 / 3 (66.7%)\n' + 'Mismatch at indices:\n' + ' [1]: 5498.42354 (ACTUAL), 849.54345 (DESIRED)\n' + ' [2]: 5498.42354 (ACTUAL), 0.0 (DESIRED)\n' 'Max absolute difference among violations: ' '5498.42354\n' 'Max relative difference among violations: 5.4722099') @@ -519,6 +575,8 @@ def test_array_vs_scalar(self): a = [5498.42354, 0.00] expected_msg = ('Mismatched elements: 1 / 2 (50%)\n' + 'Mismatch at index:\n' + ' [1]: 5498.42354 (ACTUAL), 0.0 (DESIRED)\n' 'Max absolute difference among violations: ' '5498.42354\n' 'Max relative difference among violations: inf') @@ -527,6 +585,8 @@ def test_array_vs_scalar(self): b = 0 expected_msg = ('Mismatched elements: 1 / 2 (50%)\n' + 'Mismatch at index:\n' + ' [0]: 5498.42354 (ACTUAL), 0 (DESIRED)\n' 'Max absolute difference among violations: ' '5498.42354\n' 'Max relative difference among violations: inf') @@ -555,6 +615,18 @@ def test_inf(self): assert_raises(AssertionError, lambda: self._assert_func(a, b)) + def test_complex_inf(self): + a = np.array([np.inf + 1.j, 2. + 1.j, 3. + 1.j]) + b = a.copy() + self._assert_func(a, b) + b[1] = 3. + 1.j + expected_msg = ('Mismatched elements: 1 / 3 (33.3%)\n' + 'Mismatch at index:\n' + ' [1]: (2+1j) (ACTUAL), (3+1j) (DESIRED)\n' + 'Max absolute difference among violations: 1.\n') + with pytest.raises(AssertionError, match=re.escape(expected_msg)): + self._assert_func(a, b) + def test_subclass(self): a = np.array([[1., 2.], [3., 4.]]) b = np.ma.masked_array([[1., 2.], [0., 4.]], @@ -603,6 +675,8 @@ def all(self, *args, **kwargs): all(z) b = np.array([1., 202]).view(MyArray) expected_msg = ('Mismatched elements: 1 / 2 (50%)\n' + 'Mismatch at index:\n' + ' [1]: 2.0 (ACTUAL), 202.0 (DESIRED)\n' 'Max absolute difference among violations: 200.\n' 'Max relative difference among violations: 0.99009') with pytest.raises(AssertionError, match=re.escape(expected_msg)): @@ -629,8 +703,8 @@ def all(self, *args, **kwargs): class TestAlmostEqual(_GenericTest): - def setup_method(self): - self._assert_func = assert_almost_equal + def _assert_func(self, *args, **kwargs): + assert_almost_equal(*args, **kwargs) def test_closeness(self): # Note that in the course of time we ended up with @@ -693,6 +767,10 @@ def test_error_message(self): # Test with a different amount of decimal digits expected_msg = ('Mismatched elements: 3 / 3 (100%)\n' + 'Mismatch at indices:\n' + ' [0]: 1.00000000001 (ACTUAL), 1.00000000002 (DESIRED)\n' + ' [1]: 2.00000000002 (ACTUAL), 2.00000000003 (DESIRED)\n' + ' [2]: 3.00003 (ACTUAL), 3.00004 (DESIRED)\n' 'Max absolute difference among violations: 1.e-05\n' 'Max relative difference among violations: ' '3.33328889e-06\n' @@ -708,6 +786,8 @@ def test_error_message(self): # differs. Note that we only check for the formatting of the arrays # themselves. expected_msg = ('Mismatched elements: 1 / 3 (33.3%)\n' + 'Mismatch at index:\n' + ' [2]: 3.00003 (ACTUAL), 3.00004 (DESIRED)\n' 'Max absolute difference among violations: 1.e-05\n' 'Max relative difference among violations: ' '3.33328889e-06\n' @@ -720,6 +800,8 @@ def test_error_message(self): x = np.array([np.inf, 0]) y = np.array([np.inf, 1]) expected_msg = ('Mismatched elements: 1 / 2 (50%)\n' + 'Mismatch at index:\n' + ' [1]: 0.0 (ACTUAL), 1.0 (DESIRED)\n' 'Max absolute difference among violations: 1.\n' 'Max relative difference among violations: 1.\n' ' ACTUAL: array([inf, 0.])\n' @@ -731,6 +813,9 @@ def test_error_message(self): x = np.array([1, 2]) y = np.array([0, 0]) expected_msg = ('Mismatched elements: 2 / 2 (100%)\n' + 'Mismatch at indices:\n' + ' [0]: 1 (ACTUAL), 0 (DESIRED)\n' + ' [1]: 2 (ACTUAL), 0 (DESIRED)\n' 'Max absolute difference among violations: 2\n' 'Max relative difference among violations: inf') with pytest.raises(AssertionError, match=re.escape(expected_msg)): @@ -742,6 +827,12 @@ def test_error_message_2(self): x = 2 y = np.ones(20) expected_msg = ('Mismatched elements: 20 / 20 (100%)\n' + 'First 5 mismatches are at indices:\n' + ' [0]: 2 (ACTUAL), 1.0 (DESIRED)\n' + ' [1]: 2 (ACTUAL), 1.0 (DESIRED)\n' + ' [2]: 2 (ACTUAL), 1.0 (DESIRED)\n' + ' [3]: 2 (ACTUAL), 1.0 (DESIRED)\n' + ' [4]: 2 (ACTUAL), 1.0 (DESIRED)\n' 'Max absolute difference among violations: 1.\n' 'Max relative difference among violations: 1.') with pytest.raises(AssertionError, match=re.escape(expected_msg)): @@ -750,6 +841,12 @@ def test_error_message_2(self): y = 2 x = np.ones(20) expected_msg = ('Mismatched elements: 20 / 20 (100%)\n' + 'First 5 mismatches are at indices:\n' + ' [0]: 1.0 (ACTUAL), 2 (DESIRED)\n' + ' [1]: 1.0 (ACTUAL), 2 (DESIRED)\n' + ' [2]: 1.0 (ACTUAL), 2 (DESIRED)\n' + ' [3]: 1.0 (ACTUAL), 2 (DESIRED)\n' + ' [4]: 1.0 (ACTUAL), 2 (DESIRED)\n' 'Max absolute difference among violations: 1.\n' 'Max relative difference among violations: 0.5') with pytest.raises(AssertionError, match=re.escape(expected_msg)): @@ -776,8 +873,8 @@ def all(self, *args, **kwargs): class TestApproxEqual: - def setup_method(self): - self._assert_func = assert_approx_equal + def _assert_func(self, *args, **kwargs): + assert_approx_equal(*args, **kwargs) def test_simple_0d_arrays(self): x = np.array(1234.22) @@ -819,8 +916,8 @@ def test_nan_items(self): class TestArrayAssertLess: - def setup_method(self): - self._assert_func = assert_array_less + def _assert_func(self, *args, **kwargs): + assert_array_less(*args, **kwargs) def test_simple_arrays(self): x = np.array([1.1, 2.2]) @@ -838,6 +935,9 @@ def test_simple_arrays(self): b = np.array([2, 4, 6, 8]) expected_msg = ('Mismatched elements: 2 / 4 (50%)\n' + 'Mismatch at indices:\n' + ' [2]: 6 (x), 6 (y)\n' + ' [3]: 20 (x), 8 (y)\n' 'Max absolute difference among violations: 12\n' 'Max relative difference among violations: 1.5') with pytest.raises(AssertionError, match=re.escape(expected_msg)): @@ -849,6 +949,11 @@ def test_rank2(self): self._assert_func(x, y) expected_msg = ('Mismatched elements: 4 / 4 (100%)\n' + 'Mismatch at indices:\n' + ' [0, 0]: 1.2 (x), 1.1 (y)\n' + ' [0, 1]: 2.3 (x), 2.2 (y)\n' + ' [1, 0]: 3.4 (x), 3.3 (y)\n' + ' [1, 1]: 4.5 (x), 4.4 (y)\n' 'Max absolute difference among violations: 0.1\n' 'Max relative difference among violations: 0.09090909') with pytest.raises(AssertionError, match=re.escape(expected_msg)): @@ -867,6 +972,8 @@ def test_rank3(self): y[0, 0, 0] = 0 expected_msg = ('Mismatched elements: 1 / 8 (12.5%)\n' + 'Mismatch at index:\n' + ' [0, 0, 0]: 1.0 (x), 0.0 (y)\n' 'Max absolute difference among violations: 1.\n' 'Max relative difference among violations: inf') with pytest.raises(AssertionError, match=re.escape(expected_msg)): @@ -910,12 +1017,20 @@ def test_simple_items_and_array(self): y = 999090.54 expected_msg = ('Mismatched elements: 1 / 12 (8.33%)\n' + 'Mismatch at index:\n' + ' [1, 1]: 999090.54 (x), 999090.54 (y)\n' 'Max absolute difference among violations: 0.\n' 'Max relative difference among violations: 0.') with pytest.raises(AssertionError, match=re.escape(expected_msg)): self._assert_func(x, y) expected_msg = ('Mismatched elements: 12 / 12 (100%)\n' + 'First 5 mismatches are at indices:\n' + ' [0, 0]: 999090.54 (x), 3.4536 (y)\n' + ' [0, 1]: 999090.54 (x), 2390.5436 (y)\n' + ' [0, 2]: 999090.54 (x), 435.54657 (y)\n' + ' [0, 3]: 999090.54 (x), 324525.4535 (y)\n' + ' [1, 0]: 999090.54 (x), 5449.54 (y)\n' 'Max absolute difference among violations: ' '999087.0864\n' 'Max relative difference among violations: ' @@ -928,12 +1043,17 @@ def test_zeroes(self): y = np.array(87654.) expected_msg = ('Mismatched elements: 1 / 3 (33.3%)\n' + 'Mismatch at index:\n' + ' [0]: 546456.0 (x), 87654.0 (y)\n' 'Max absolute difference among violations: 458802.\n' 'Max relative difference among violations: 5.23423917') with pytest.raises(AssertionError, match=re.escape(expected_msg)): self._assert_func(x, y) expected_msg = ('Mismatched elements: 2 / 3 (66.7%)\n' + 'Mismatch at indices:\n' + ' [1]: 87654.0 (x), 0.0 (y)\n' + ' [2]: 87654.0 (x), 15.455 (y)\n' 'Max absolute difference among violations: 87654.\n' 'Max relative difference among violations: ' '5670.5626011') @@ -943,12 +1063,18 @@ def test_zeroes(self): y = 0 expected_msg = ('Mismatched elements: 3 / 3 (100%)\n' + 'Mismatch at indices:\n' + ' [0]: 546456.0 (x), 0 (y)\n' + ' [1]: 0.0 (x), 0 (y)\n' + ' [2]: 15.455 (x), 0 (y)\n' 'Max absolute difference among violations: 546456.\n' 'Max relative difference among violations: inf') with pytest.raises(AssertionError, match=re.escape(expected_msg)): self._assert_func(x, y) expected_msg = ('Mismatched elements: 1 / 3 (33.3%)\n' + 'Mismatch at index:\n' + ' [1]: 0 (x), 0.0 (y)\n' 'Max absolute difference among violations: 0.\n' 'Max relative difference among violations: inf') with pytest.raises(AssertionError, match=re.escape(expected_msg)): @@ -1017,7 +1143,10 @@ def test_strict(self): with pytest.raises(AssertionError): self._assert_func(x, y.astype(np.float32), strict=True) - +@pytest.mark.filterwarnings( + "ignore:.*NumPy warning suppression and assertion utilities are deprecated" + ".*:DeprecationWarning") +@pytest.mark.thread_unsafe(reason="checks global module & deprecated warnings") class TestWarns: def test_warn(self): @@ -1134,12 +1263,16 @@ def test_simple(self): b = np.array([x, y, x, x]) c = np.array([x, y, x, z]) expected_msg = ('Mismatched elements: 1 / 4 (25%)\n' + 'Mismatch at index:\n' + ' [3]: 0.001 (ACTUAL), 0.0 (DESIRED)\n' 'Max absolute difference among violations: 0.001\n' 'Max relative difference among violations: inf') with pytest.raises(AssertionError, match=re.escape(expected_msg)): assert_allclose(b, c) expected_msg = ('Mismatched elements: 1 / 4 (25%)\n' + 'Mismatch at index:\n' + ' [3]: 0.0 (ACTUAL), 0.001 (DESIRED)\n' 'Max absolute difference among violations: 0.001\n' 'Max relative difference among violations: 1.') with pytest.raises(AssertionError, match=re.escape(expected_msg)): @@ -1155,6 +1288,8 @@ def test_report_fail_percentage(self): b = np.array([1, 1, 1, 2]) expected_msg = ('Mismatched elements: 1 / 4 (25%)\n' + 'Mismatch at index:\n' + ' [3]: 1 (ACTUAL), 2 (DESIRED)\n' 'Max absolute difference among violations: 1\n' 'Max relative difference among violations: 0.5') with pytest.raises(AssertionError, match=re.escape(expected_msg)): @@ -1166,11 +1301,21 @@ def test_equal_nan(self): # Should not raise: assert_allclose(a, b, equal_nan=True) + a = np.array([complex(np.nan, np.inf)]) + b = np.array([complex(np.nan, np.inf)]) + assert_allclose(a, b, equal_nan=True) + b = np.array([complex(np.nan, -np.inf)]) + assert_allclose(a, b, equal_nan=True) + def test_not_equal_nan(self): a = np.array([np.nan]) b = np.array([np.nan]) assert_raises(AssertionError, assert_allclose, a, b, equal_nan=False) + a = np.array([complex(np.nan, np.inf)]) + b = np.array([complex(np.nan, np.inf)]) + assert_raises(AssertionError, assert_allclose, a, b, equal_nan=False) + def test_equal_nan_default(self): # Make sure equal_nan default behavior remains unchanged. (All # of these functions use assert_array_compare under the hood.) @@ -1219,6 +1364,33 @@ def test_strict(self): with pytest.raises(AssertionError): assert_allclose(x, x.astype(np.float32), strict=True) + def test_infs(self): + a = np.array([np.inf]) + b = np.array([np.inf]) + assert_allclose(a, b) + + b = np.array([3.]) + expected_msg = 'inf location mismatch:' + with pytest.raises(AssertionError, match=re.escape(expected_msg)): + assert_allclose(a, b) + + b = np.array([-np.inf]) + expected_msg = 'inf values mismatch:' + with pytest.raises(AssertionError, match=re.escape(expected_msg)): + assert_allclose(a, b) + b = np.array([complex(np.inf, 1.)]) + expected_msg = 'inf values mismatch:' + with pytest.raises(AssertionError, match=re.escape(expected_msg)): + assert_allclose(a, b) + + a = np.array([complex(np.inf, 1.)]) + b = np.array([complex(np.inf, 1.)]) + assert_allclose(a, b) + + b = np.array([complex(np.inf, 2.)]) + expected_msg = 'inf values mismatch:' + with pytest.raises(AssertionError, match=re.escape(expected_msg)): + assert_allclose(a, b) class TestArrayAlmostEqualNulp: @@ -1589,6 +1761,7 @@ def _get_fresh_mod(): return my_mod +@pytest.mark.thread_unsafe(reason="checks global module & deprecated warnings") def test_clear_and_catch_warnings(): # Initial state of module, no warnings my_mod = _get_fresh_mod() @@ -1621,6 +1794,10 @@ def test_clear_and_catch_warnings(): assert_warn_len_equal(my_mod, 0) +@pytest.mark.filterwarnings( + "ignore:.*NumPy warning suppression and assertion utilities are deprecated" + ".*:DeprecationWarning") +@pytest.mark.thread_unsafe(reason="checks global module & deprecated warnings") def test_suppress_warnings_module(): # Initial state of module, no warnings my_mod = _get_fresh_mod() @@ -1667,6 +1844,10 @@ def warn(arr): assert_warn_len_equal(my_mod, 0) +@pytest.mark.filterwarnings( + "ignore:.*NumPy warning suppression and assertion utilities are deprecated" + ".*:DeprecationWarning") +@pytest.mark.thread_unsafe(reason="checks global module & deprecated warnings") def test_suppress_warnings_type(): # Initial state of module, no warnings my_mod = _get_fresh_mod() @@ -1695,6 +1876,12 @@ def test_suppress_warnings_type(): assert_warn_len_equal(my_mod, 0) +@pytest.mark.filterwarnings( + "ignore:.*NumPy warning suppression and assertion utilities are deprecated" + ".*:DeprecationWarning") +@pytest.mark.thread_unsafe( + reason="uses deprecated thread-unsafe warnings control utilities" +) def test_suppress_warnings_decorate_no_record(): sup = suppress_warnings() sup.filter(UserWarning) @@ -1710,6 +1897,12 @@ def warn(category): assert_equal(len(w), 1) +@pytest.mark.filterwarnings( + "ignore:.*NumPy warning suppression and assertion utilities are deprecated" + ".*:DeprecationWarning") +@pytest.mark.thread_unsafe( + reason="uses deprecated thread-unsafe warnings control utilities" +) def test_suppress_warnings_record(): sup = suppress_warnings() log1 = sup.record() @@ -1747,9 +1940,16 @@ def test_suppress_warnings_record(): warnings.warn('Some warning') warnings.warn('Some other warning') assert_equal(len(sup2.log), 1) - assert_equal(len(sup.log), 1) + # includes a DeprecationWarning for suppress_warnings + assert_equal(len(sup.log), 2) +@pytest.mark.filterwarnings( + "ignore:.*NumPy warning suppression and assertion utilities are deprecated" + ".*:DeprecationWarning") +@pytest.mark.thread_unsafe( + reason="uses deprecated thread-unsafe warnings control utilities" +) def test_suppress_warnings_forwarding(): def warn_other_module(): # Apply along axis is implemented in python; stacklevel=2 means @@ -1765,7 +1965,8 @@ def warn(arr): for i in range(2): warnings.warn("Some warning") - assert_equal(len(sup.log), 2) + # includes a DeprecationWarning for suppress_warnings + assert_equal(len(sup.log), 3) with suppress_warnings() as sup: sup.record() @@ -1774,7 +1975,8 @@ def warn(arr): warnings.warn("Some warning") warnings.warn("Some warning") - assert_equal(len(sup.log), 2) + # includes a DeprecationWarning for suppress_warnings + assert_equal(len(sup.log), 3) with suppress_warnings() as sup: sup.record() @@ -1784,7 +1986,8 @@ def warn(arr): warnings.warn("Some warning") warn_other_module() - assert_equal(len(sup.log), 2) + # includes a DeprecationWarning for suppress_warnings + assert_equal(len(sup.log), 3) with suppress_warnings() as sup: sup.record() @@ -1794,7 +1997,8 @@ def warn(arr): warnings.warn("Some other warning") warn_other_module() - assert_equal(len(sup.log), 2) + # includes a DeprecationWarning for suppress_warnings + assert_equal(len(sup.log), 3) def test_tempdir(): @@ -1835,6 +2039,7 @@ class my_cacw(clear_and_catch_warnings): class_modules = (sys.modules[__name__],) +@pytest.mark.thread_unsafe(reason="checks global module & deprecated warnings") def test_clear_and_catch_warnings_inherit(): # Test can subclass and add default modules my_mod = _get_fresh_mod() @@ -1845,6 +2050,7 @@ def test_clear_and_catch_warnings_inherit(): @pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts") +@pytest.mark.thread_unsafe(reason="garbage collector is global state") class TestAssertNoGcCycles: """ Test assert_no_gc_cycles """ diff --git a/numpy/tests/test_configtool.py b/numpy/tests/test_configtool.py index e0b9bb1b7aff..917bbf55712f 100644 --- a/numpy/tests/test_configtool.py +++ b/numpy/tests/test_configtool.py @@ -1,4 +1,3 @@ -import importlib import importlib.metadata import os import pathlib @@ -15,8 +14,10 @@ PKG_CONFIG_DIR = NUMPY_ROOT / '_core' / 'lib' / 'pkgconfig' -@pytest.mark.skipif(not IS_INSTALLED, reason="`numpy-config` not expected to be installed") -@pytest.mark.skipif(IS_WASM, reason="wasm interpreter cannot start subprocess") +@pytest.mark.skipif(not IS_INSTALLED, + reason="`numpy-config` not expected to be installed") +@pytest.mark.skipif(IS_WASM, + reason="wasm interpreter cannot start subprocess") class TestNumpyConfig: def check_numpyconfig(self, arg): p = subprocess.run(['numpy-config', arg], capture_output=True, text=True) @@ -33,16 +34,18 @@ def test_configtool_cflags(self): def test_configtool_pkgconfigdir(self): stdout = self.check_numpyconfig('--pkgconfigdir') - assert pathlib.Path(stdout) == PKG_CONFIG_DIR + assert pathlib.Path(stdout) == PKG_CONFIG_DIR.resolve() -@pytest.mark.skipif(not IS_INSTALLED, reason="numpy must be installed to check its entrypoints") +@pytest.mark.skipif(not IS_INSTALLED, + reason="numpy must be installed to check its entrypoints") def test_pkg_config_entrypoint(): (entrypoint,) = importlib.metadata.entry_points(group='pkg_config', name='numpy') assert entrypoint.value == numpy._core.lib.pkgconfig.__name__ -@pytest.mark.skipif(not IS_INSTALLED, reason="numpy.pc is only available when numpy is installed") +@pytest.mark.skipif(not IS_INSTALLED, + reason="numpy.pc is only available when numpy is installed") @pytest.mark.skipif(IS_EDITABLE, reason="editable installs don't have a numpy.pc") def test_pkg_config_config_exists(): assert PKG_CONFIG_DIR.joinpath('numpy.pc').is_file() diff --git a/numpy/tests/test_ctypeslib.py b/numpy/tests/test_ctypeslib.py index 68d31416040b..b88910ce457e 100644 --- a/numpy/tests/test_ctypeslib.py +++ b/numpy/tests/test_ctypeslib.py @@ -124,6 +124,7 @@ def test_flags(self): assert_(p.from_param(x)) assert_raises(TypeError, p.from_param, np.array([[1, 2], [3, 4]])) + @pytest.mark.thread_unsafe(reason="checks that global ndpointer cache is updating") def test_cache(self): assert_(ndpointer(dtype=np.float64) is ndpointer(dtype=np.float64)) @@ -178,6 +179,7 @@ def test_return(self, dt): arr.__array_interface__['data'] ) + @pytest.mark.thread_unsafe(reason="mutates global test vars") def test_vague_return_value(self): """ Test that vague ndpointer return values do not promote to arrays """ arr = np.zeros((2, 3)) @@ -252,6 +254,7 @@ def check(x): check(as_array(pointer(c_array[0]), shape=(2,))) check(as_array(pointer(c_array[0][0]), shape=(2, 3))) + @pytest.mark.thread_unsafe(reason="garbage collector is global state") def test_reference_cycles(self): # related to gh-6511 import ctypes @@ -302,6 +305,7 @@ def test_scalar(self): ct = np.ctypeslib.as_ctypes_type(dt) assert_equal(ct, ctypes.c_uint16) + @pytest.mark.thread_unsafe(reason="some sort of data race? (gh-29943)") def test_subarray(self): dt = np.dtype((np.int32, (2, 3))) ct = np.ctypeslib.as_ctypes_type(dt) @@ -321,6 +325,7 @@ def test_structure(self): ('b', ctypes.c_uint32), ]) + @pytest.mark.thread_unsafe(reason="some sort of data race? (gh-29943)") def test_structure_aligned(self): dt = np.dtype([ ('a', np.uint16), @@ -351,6 +356,7 @@ def test_union(self): ('b', ctypes.c_uint32), ]) + @pytest.mark.thread_unsafe(reason="some sort of data race? (gh-29943)") def test_padded_union(self): dt = np.dtype({ 'names': ['a', 'b'], diff --git a/numpy/tests/test_lazyloading.py b/numpy/tests/test_lazyloading.py index 5f6233f1c5cb..7b0324802611 100644 --- a/numpy/tests/test_lazyloading.py +++ b/numpy/tests/test_lazyloading.py @@ -1,23 +1,23 @@ +import subprocess import sys -from importlib.util import LazyLoader, find_spec, module_from_spec +import textwrap import pytest +from numpy.testing import IS_WASM -# Warning raised by _reload_guard() in numpy/__init__.py -@pytest.mark.filterwarnings("ignore:The NumPy module was reloaded") + +@pytest.mark.skipif(IS_WASM, reason="can't start subprocess") def test_lazy_load(): # gh-22045. lazyload doesn't import submodule names into the namespace - # muck with sys.modules to test the importing system - old_numpy = sys.modules.pop("numpy") - numpy_modules = {} - for mod_name, mod in list(sys.modules.items()): - if mod_name[:6] == "numpy.": - numpy_modules[mod_name] = mod - sys.modules.pop(mod_name) + # Test within a new process, to ensure that we do not mess with the + # global state during the test run (could lead to cryptic test failures). + # This is generally unsafe, especially, since we also reload the C-modules. + code = textwrap.dedent(r""" + import sys + from importlib.util import LazyLoader, find_spec, module_from_spec - try: # create lazy load of numpy as np spec = find_spec("numpy") module = module_from_spec(spec) @@ -31,8 +31,12 @@ def test_lazy_load(): # test triggering the import of the package np.ndarray - - finally: - if old_numpy: - sys.modules["numpy"] = old_numpy - sys.modules.update(numpy_modules) + """) + p = subprocess.run( + (sys.executable, '-c', code), + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + encoding='utf-8', + check=False, + ) + assert p.returncode == 0, p.stdout diff --git a/numpy/tests/test_numpy_config.py b/numpy/tests/test_numpy_config.py index f01a279574a5..9219379b2552 100644 --- a/numpy/tests/test_numpy_config.py +++ b/numpy/tests/test_numpy_config.py @@ -21,6 +21,7 @@ class TestNumPyConfigs: ] @patch("numpy.__config__._check_pyyaml") + @pytest.mark.thread_unsafe(reason="unittest.mock.patch updates global state") def test_pyyaml_not_found(self, mock_yaml_importer): mock_yaml_importer.side_effect = ModuleNotFoundError() with pytest.warns(UserWarning): diff --git a/numpy/tests/test_public_api.py b/numpy/tests/test_public_api.py index a56cd13296e3..3ccba81ebaff 100644 --- a/numpy/tests/test_public_api.py +++ b/numpy/tests/test_public_api.py @@ -141,17 +141,6 @@ def test_NPY_NO_EXPORT(): "typing.mypy_plugin", "version", ]] -if sys.version_info < (3, 12): - PUBLIC_MODULES += [ - 'numpy.' + s for s in [ - "distutils", - "distutils.cpuinfo", - "distutils.exec_command", - "distutils.misc_util", - "distutils.log", - "distutils.system_info", - ] - ] PUBLIC_ALIASED_MODULES = [ @@ -190,10 +179,8 @@ def test_NPY_NO_EXPORT(): "f2py.rules", "f2py.symbolic", "f2py.use_rules", - "fft.helper", "lib.user_array", # note: not in np.lib, but probably should just be deleted "linalg.lapack_lite", - "linalg.linalg", "ma.core", "ma.testutils", "matlib", @@ -204,66 +191,6 @@ def test_NPY_NO_EXPORT(): "random.bit_generator", "testing.print_coercion_tables", ]] -if sys.version_info < (3, 12): - PRIVATE_BUT_PRESENT_MODULES += [ - 'numpy.' + s for s in [ - "distutils.armccompiler", - "distutils.fujitsuccompiler", - "distutils.ccompiler", - 'distutils.ccompiler_opt', - "distutils.command", - "distutils.command.autodist", - "distutils.command.bdist_rpm", - "distutils.command.build", - "distutils.command.build_clib", - "distutils.command.build_ext", - "distutils.command.build_py", - "distutils.command.build_scripts", - "distutils.command.build_src", - "distutils.command.config", - "distutils.command.config_compiler", - "distutils.command.develop", - "distutils.command.egg_info", - "distutils.command.install", - "distutils.command.install_clib", - "distutils.command.install_data", - "distutils.command.install_headers", - "distutils.command.sdist", - "distutils.conv_template", - "distutils.core", - "distutils.extension", - "distutils.fcompiler", - "distutils.fcompiler.absoft", - "distutils.fcompiler.arm", - "distutils.fcompiler.compaq", - "distutils.fcompiler.environment", - "distutils.fcompiler.g95", - "distutils.fcompiler.gnu", - "distutils.fcompiler.hpux", - "distutils.fcompiler.ibm", - "distutils.fcompiler.intel", - "distutils.fcompiler.lahey", - "distutils.fcompiler.mips", - "distutils.fcompiler.nag", - "distutils.fcompiler.none", - "distutils.fcompiler.pathf95", - "distutils.fcompiler.pg", - "distutils.fcompiler.nv", - "distutils.fcompiler.sun", - "distutils.fcompiler.vast", - "distutils.fcompiler.fujitsu", - "distutils.from_template", - "distutils.intelccompiler", - "distutils.lib2def", - "distutils.line_endings", - "distutils.mingw32ccompiler", - "distutils.msvccompiler", - "distutils.npy_pkg_config", - "distutils.numpy_distribution", - "distutils.pathccompiler", - "distutils.unixccompiler", - ] - ] def is_unexpected(name): @@ -276,12 +203,6 @@ def is_unexpected(name): ) -if sys.version_info >= (3, 12): - SKIP_LIST = [] -else: - SKIP_LIST = ["numpy.distutils.msvc9compiler"] - - def test_all_modules_are_expected(): """ Test that we don't add anything that looks like a new public module by @@ -292,7 +213,7 @@ def test_all_modules_are_expected(): for _, modname, ispkg in pkgutil.walk_packages(path=np.__path__, prefix=np.__name__ + '.', onerror=None): - if is_unexpected(modname) and modname not in SKIP_LIST: + if is_unexpected(modname): # We have a name that is new. If that's on purpose, add it to # PUBLIC_MODULES. We don't expect to have to add anything to # PRIVATE_BUT_PRESENT_MODULES. Use an underscore in the name! @@ -317,12 +238,6 @@ def test_all_modules_are_expected(): 'numpy.matlib.ctypeslib', 'numpy.matlib.ma', ] -if sys.version_info < (3, 12): - SKIP_LIST_2 += [ - 'numpy.distutils.log.sys', - 'numpy.distutils.log.logging', - 'numpy.distutils.log.warnings', - ] def test_all_modules_are_expected_2(): @@ -780,7 +695,7 @@ def test___qualname___and___module___attribute(): inspect.ismodule(member) and # it's a module "numpy" in member.__name__ and # inside NumPy not member_name.startswith("_") and # not private - member_name not in {"tests", "typing"} and # 2024-12: type names don't match + member_name not in {"tests", "typing"} and # type names don't match "numpy._core" not in member.__name__ and # outside _core member not in visited_modules # not visited yet ): diff --git a/numpy/tests/test_reloading.py b/numpy/tests/test_reloading.py index c21dc007b232..aa87ae104318 100644 --- a/numpy/tests/test_reloading.py +++ b/numpy/tests/test_reloading.py @@ -7,15 +7,10 @@ import pytest import numpy.exceptions as ex -from numpy.testing import ( - IS_WASM, - assert_, - assert_equal, - assert_raises, - assert_warns, -) +from numpy.testing import IS_WASM, assert_, assert_equal, assert_raises +@pytest.mark.thread_unsafe(reason="reloads global module") def test_numpy_reloading(): # gh-7844. Also check that relevant globals retain their identity. import numpy as np @@ -25,14 +20,14 @@ def test_numpy_reloading(): VisibleDeprecationWarning = ex.VisibleDeprecationWarning ModuleDeprecationWarning = ex.ModuleDeprecationWarning - with assert_warns(UserWarning): + with pytest.warns(UserWarning): reload(np) assert_(_NoValue is np._NoValue) assert_(ModuleDeprecationWarning is ex.ModuleDeprecationWarning) assert_(VisibleDeprecationWarning is ex.VisibleDeprecationWarning) assert_raises(RuntimeError, reload, numpy._globals) - with assert_warns(UserWarning): + with pytest.warns(UserWarning): reload(np) assert_(_NoValue is np._NoValue) assert_(ModuleDeprecationWarning is ex.ModuleDeprecationWarning) @@ -48,27 +43,34 @@ def test_novalue(): @pytest.mark.skipif(IS_WASM, reason="can't start subprocess") def test_full_reimport(): - """At the time of writing this, it is *not* truly supported, but - apparently enough users rely on it, for it to be an annoying change - when it started failing previously. - """ + # Reimporting numpy like this is not safe due to use of global C state, + # and has unexpected side effects. Test that an ImportError is raised. + # When all extension modules are isolated, this should test that clearing + # sys.modules and reimporting numpy works without error. + # Test within a new process, to ensure that we do not mess with the # global state during the test run (could lead to cryptic test failures). # This is generally unsafe, especially, since we also reload the C-modules. code = textwrap.dedent(r""" import sys - from pytest import warns import numpy as np - for k in list(sys.modules.keys()): - if "numpy" in k: - del sys.modules[k] + for k in [k for k in sys.modules if k.startswith('numpy')]: + del sys.modules[k] - with warns(UserWarning): + try: import numpy as np + except ImportError as err: + if str(err) != "cannot load module more than once per process": + raise SystemExit(f"Unexpected ImportError: {err}") + else: + raise SystemExit("DID NOT RAISE ImportError") """) - p = subprocess.run([sys.executable, '-c', code], capture_output=True) - if p.returncode: - raise AssertionError( - f"Non-zero return code: {p.returncode!r}\n\n{p.stderr.decode()}" - ) + p = subprocess.run( + (sys.executable, '-c', code), + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + encoding='utf-8', + check=False, + ) + assert p.returncode == 0, p.stdout diff --git a/numpy/tests/test_scripts.py b/numpy/tests/test_scripts.py index d8ce95887bce..e5f0a07436c8 100644 --- a/numpy/tests/test_scripts.py +++ b/numpy/tests/test_scripts.py @@ -5,16 +5,13 @@ import os import subprocess import sys -from os.path import dirname, isfile -from os.path import join as pathjoin +from os.path import dirname import pytest import numpy as np from numpy.testing import IS_WASM, assert_equal -is_inplace = isfile(pathjoin(dirname(np.__file__), '..', 'setup.py')) - def find_f2py_commands(): if sys.platform == 'win32': @@ -34,7 +31,6 @@ def find_f2py_commands(): return ['f2py', 'f2py' + major, 'f2py' + major + '.' + minor] -@pytest.mark.skipif(is_inplace, reason="Cannot test f2py command inplace") @pytest.mark.xfail(reason="Test is unreliable") @pytest.mark.parametrize('f2py_cmd', find_f2py_commands()) def test_f2py(f2py_cmd): diff --git a/numpy/tests/test_warnings.py b/numpy/tests/test_warnings.py index 560ee6143265..7efa2a1d1896 100644 --- a/numpy/tests/test_warnings.py +++ b/numpy/tests/test_warnings.py @@ -34,10 +34,11 @@ def visit_Call(self, node): ast.NodeVisitor.generic_visit(self, node) if p.ls[-1] == 'simplefilter' or p.ls[-1] == 'filterwarnings': - if node.args[0].value == "ignore": - raise AssertionError( - "warnings should have an appropriate stacklevel; " - f"found in {self.__filename} on line {node.lineno}") + if getattr(node.args[0], "value", None) == "ignore": + if not self.__filename.name.startswith("test_"): + raise AssertionError( + "ignore filters should only be used in tests; " + f"found in {self.__filename} on line {node.lineno}") if p.ls[-1] == 'warn' and ( len(p.ls) == 1 or p.ls[-2] == 'warnings'): diff --git a/numpy/typing/__init__.py b/numpy/typing/__init__.py index 173c094b40aa..ef4c0885257b 100644 --- a/numpy/typing/__init__.py +++ b/numpy/typing/__init__.py @@ -104,13 +104,45 @@ >>> import numpy.typing as npt >>> T = TypeVar("T", bound=npt.NBitBase) - >>> def func(a: "np.floating[T]", b: "np.floating[T]") -> "np.floating[T]": + >>> def func(a: np.floating[T], b: np.floating[T]) -> np.floating[T]: ... ... Consequently, the likes of `~numpy.float16`, `~numpy.float32` and `~numpy.float64` are still sub-types of `~numpy.floating`, but, contrary to runtime, they're not necessarily considered as sub-classes. +.. deprecated:: 2.3 + The :class:`~numpy.typing.NBitBase` helper is deprecated and will be + removed in a future release. Prefer expressing precision relationships via + ``typing.overload`` or ``TypeVar`` definitions bounded by concrete scalar + classes. For example: + + .. code-block:: python + + from typing import TypeVar + import numpy as np + + S = TypeVar("S", bound=np.floating) + + def func(a: S, b: S) -> S: + ... + + or in the case of different input types mapping to different output types: + + .. code-block:: python + + from typing import overload + import numpy as np + + @overload + def phase(x: np.complex64) -> np.float32: ... + @overload + def phase(x: np.complex128) -> np.float64: ... + @overload + def phase(x: np.clongdouble) -> np.longdouble: ... + def phase(x: np.complexfloating) -> np.floating: + ... + Timedelta64 ~~~~~~~~~~~ @@ -169,7 +201,7 @@ def __dir__() -> list[str]: return __DIR -def __getattr__(name: str): +def __getattr__(name: str) -> object: if name == "NBitBase": import warnings diff --git a/numpy/typing/__init__.pyi b/numpy/typing/__init__.pyi new file mode 100644 index 000000000000..5af10da218d9 --- /dev/null +++ b/numpy/typing/__init__.pyi @@ -0,0 +1,8 @@ +from numpy._typing import ( # type: ignore[deprecated] + ArrayLike, + DTypeLike, + NBitBase, + NDArray, +) + +__all__ = ["ArrayLike", "DTypeLike", "NBitBase", "NDArray"] diff --git a/numpy/typing/mypy_plugin.py b/numpy/typing/mypy_plugin.py index dc1e2564fc32..21aca2bc69ef 100644 --- a/numpy/typing/mypy_plugin.py +++ b/numpy/typing/mypy_plugin.py @@ -18,6 +18,11 @@ .. versionadded:: 1.22 .. deprecated:: 2.3 + The :mod:`numpy.typing.mypy_plugin` entry-point is deprecated in favor of + platform-agnostic static type inference. Remove + ``numpy.typing.mypy_plugin`` from the ``plugins`` section of your mypy + configuration; if that surfaces new errors, please open an issue with a + minimal reproducer. Examples -------- @@ -34,7 +39,7 @@ """ from collections.abc import Callable, Iterable -from typing import TYPE_CHECKING, Final, TypeAlias, cast +from typing import TYPE_CHECKING, Final, cast import numpy as np @@ -110,7 +115,7 @@ def plugin(version: str) -> type: else: - _HookFunc: TypeAlias = Callable[[AnalyzeTypeContext], mypy.types.Type] + type _HookFunc = Callable[[AnalyzeTypeContext], mypy.types.Type] def _hook(ctx: AnalyzeTypeContext) -> mypy.types.Type: """Replace a type-alias with a concrete ``NBitBase`` subclass.""" diff --git a/numpy/typing/tests/data/fail/arithmetic.pyi b/numpy/typing/tests/data/fail/arithmetic.pyi index e94861a3eba7..a68df2ea53c3 100644 --- a/numpy/typing/tests/data/fail/arithmetic.pyi +++ b/numpy/typing/tests/data/fail/arithmetic.pyi @@ -30,7 +30,7 @@ AR_LIKE_M: list[np.datetime64] # NOTE: mypys `NoReturn` errors are, unfortunately, not that great _1 = AR_b - AR_LIKE_b # type: ignore[var-annotated] _2 = AR_LIKE_b - AR_b # type: ignore[var-annotated] -AR_i - bytes() # type: ignore[operator] +AR_i - b"" # type: ignore[operator] AR_f - AR_LIKE_m # type: ignore[operator] AR_f - AR_LIKE_M # type: ignore[operator] @@ -85,7 +85,6 @@ AR_b *= AR_LIKE_f # type: ignore[arg-type] AR_b *= AR_LIKE_c # type: ignore[arg-type] AR_b *= AR_LIKE_m # type: ignore[arg-type] -AR_u *= AR_LIKE_i # type: ignore[arg-type] AR_u *= AR_LIKE_f # type: ignore[arg-type] AR_u *= AR_LIKE_c # type: ignore[arg-type] AR_u *= AR_LIKE_m # type: ignore[arg-type] @@ -105,7 +104,6 @@ AR_b **= AR_LIKE_i # type: ignore[misc] AR_b **= AR_LIKE_f # type: ignore[misc] AR_b **= AR_LIKE_c # type: ignore[misc] -AR_u **= AR_LIKE_i # type: ignore[arg-type] AR_u **= AR_LIKE_f # type: ignore[arg-type] AR_u **= AR_LIKE_c # type: ignore[arg-type] @@ -116,7 +114,7 @@ AR_f **= AR_LIKE_c # type: ignore[arg-type] # Scalars -b_ - b_ # type: ignore[call-overload] +b_ - b_ # type: ignore[operator] dt + dt # type: ignore[operator] td - dt # type: ignore[operator] diff --git a/numpy/typing/tests/data/fail/array_constructors.pyi b/numpy/typing/tests/data/fail/array_constructors.pyi index cadc2ae595e7..6ed619958c1c 100644 --- a/numpy/typing/tests/data/fail/array_constructors.pyi +++ b/numpy/typing/tests/data/fail/array_constructors.pyi @@ -15,17 +15,17 @@ np.ones() # type: ignore[call-overload] np.array(0, float, True) # type: ignore[call-overload] -np.linspace(None, 'bob') # type: ignore[call-overload] +np.linspace(None, "bob") # type: ignore[call-overload] np.linspace(0, 2, num=10.0) # type: ignore[call-overload] -np.linspace(0, 2, endpoint='True') # type: ignore[call-overload] -np.linspace(0, 2, retstep=b'False') # type: ignore[call-overload] +np.linspace(0, 2, endpoint="True") # type: ignore[call-overload] +np.linspace(0, 2, retstep=b"False") # type: ignore[call-overload] np.linspace(0, 2, dtype=0) # type: ignore[call-overload] np.linspace(0, 2, axis=None) # type: ignore[call-overload] -np.logspace(None, 'bob') # type: ignore[call-overload] +np.logspace(None, "bob") # type: ignore[call-overload] np.logspace(0, 2, base=None) # type: ignore[call-overload] -np.geomspace(None, 'bob') # type: ignore[call-overload] +np.geomspace(None, "bob") # type: ignore[call-overload] np.stack(generator) # type: ignore[call-overload] np.hstack({1, 2}) # type: ignore[call-overload] diff --git a/numpy/typing/tests/data/fail/arrayprint.pyi b/numpy/typing/tests/data/fail/arrayprint.pyi index 224a4105b8a6..3c9dc9330a2b 100644 --- a/numpy/typing/tests/data/fail/arrayprint.pyi +++ b/numpy/typing/tests/data/fail/arrayprint.pyi @@ -8,9 +8,8 @@ AR: npt.NDArray[np.float64] func1: Callable[[Any], str] func2: Callable[[np.integer], str] -np.array2string(AR, style=None) # type: ignore[call-overload] -np.array2string(AR, legacy="1.14") # type: ignore[call-overload] -np.array2string(AR, sign="*") # type: ignore[call-overload] -np.array2string(AR, floatmode="default") # type: ignore[call-overload] -np.array2string(AR, formatter={"A": func1}) # type: ignore[call-overload] -np.array2string(AR, formatter={"float": func2}) # type: ignore[call-overload] +np.array2string(AR, legacy="1.14") # type: ignore[arg-type] +np.array2string(AR, sign="*") # type: ignore[arg-type] +np.array2string(AR, floatmode="default") # type: ignore[arg-type] +np.array2string(AR, formatter={"A": func1}) # type: ignore[arg-type] +np.array2string(AR, formatter={"float": func2}) # type: ignore[arg-type] diff --git a/numpy/typing/tests/data/fail/bitwise_ops.pyi b/numpy/typing/tests/data/fail/bitwise_ops.pyi index 3538ec7d64c7..f4de2928ff54 100644 --- a/numpy/typing/tests/data/fail/bitwise_ops.pyi +++ b/numpy/typing/tests/data/fail/bitwise_ops.pyi @@ -4,14 +4,14 @@ i8 = np.int64() i4 = np.int32() u8 = np.uint64() b_ = np.bool() -i = int() +i = 0 f8 = np.float64() -b_ >> f8 # type: ignore[call-overload] -i8 << f8 # type: ignore[call-overload] +b_ >> f8 # type: ignore[operator] +i8 << f8 # type: ignore[operator] i | f8 # type: ignore[operator] -i8 ^ f8 # type: ignore[call-overload] -u8 & f8 # type: ignore[call-overload] +i8 ^ f8 # type: ignore[operator] +u8 & f8 # type: ignore[operator] ~f8 # type: ignore[operator] # TODO: Certain mixes like i4 << u8 go to float and thus should fail diff --git a/numpy/typing/tests/data/fail/char.pyi b/numpy/typing/tests/data/fail/char.pyi index 62c4475c29be..3dbe5eda296e 100644 --- a/numpy/typing/tests/data/fail/char.pyi +++ b/numpy/typing/tests/data/fail/char.pyi @@ -19,9 +19,7 @@ np.char.join(AR_U, b"_") # type: ignore[arg-type] np.char.join(AR_S, "_") # type: ignore[arg-type] np.char.ljust(AR_U, 5, fillchar=b"a") # type: ignore[arg-type] -np.char.ljust(AR_S, 5, fillchar="a") # type: ignore[arg-type] np.char.rjust(AR_U, 5, fillchar=b"a") # type: ignore[arg-type] -np.char.rjust(AR_S, 5, fillchar="a") # type: ignore[arg-type] np.char.lstrip(AR_U, chars=b"a") # type: ignore[arg-type] np.char.lstrip(AR_S, chars="a") # type: ignore[arg-type] diff --git a/numpy/typing/tests/data/fail/chararray.pyi b/numpy/typing/tests/data/fail/chararray.pyi index fb52f7349dd1..589895510227 100644 --- a/numpy/typing/tests/data/fail/chararray.pyi +++ b/numpy/typing/tests/data/fail/chararray.pyi @@ -1,4 +1,5 @@ from typing import Any + import numpy as np AR_U: np.char.chararray[tuple[Any, ...], np.dtype[np.str_]] @@ -11,9 +12,7 @@ AR_U.join(b"_") # type: ignore[arg-type] AR_S.join("_") # type: ignore[arg-type] AR_U.ljust(5, fillchar=b"a") # type: ignore[arg-type] -AR_S.ljust(5, fillchar="a") # type: ignore[arg-type] AR_U.rjust(5, fillchar=b"a") # type: ignore[arg-type] -AR_S.rjust(5, fillchar="a") # type: ignore[arg-type] AR_U.lstrip(chars=b"a") # type: ignore[arg-type] AR_S.lstrip(chars="a") # type: ignore[arg-type] diff --git a/numpy/typing/tests/data/fail/comparisons.pyi b/numpy/typing/tests/data/fail/comparisons.pyi index 3c8a94bff240..d2965b5c1a91 100644 --- a/numpy/typing/tests/data/fail/comparisons.pyi +++ b/numpy/typing/tests/data/fail/comparisons.pyi @@ -21,7 +21,7 @@ AR_M > AR_i # type: ignore[operator] AR_M > AR_f # type: ignore[operator] AR_M > AR_m # type: ignore[operator] -AR_i > str() # type: ignore[operator] -AR_i > bytes() # type: ignore[operator] -str() > AR_M # type: ignore[operator] -bytes() > AR_M # type: ignore[operator] +AR_i > "" # type: ignore[operator] +AR_i > b"" # type: ignore[operator] +"" > AR_M # type: ignore[operator] +b"" > AR_M # type: ignore[operator] diff --git a/numpy/typing/tests/data/fail/datasource.pyi b/numpy/typing/tests/data/fail/datasource.pyi index 267b672baea7..4c603cf693a1 100644 --- a/numpy/typing/tests/data/fail/datasource.pyi +++ b/numpy/typing/tests/data/fail/datasource.pyi @@ -1,4 +1,5 @@ from pathlib import Path + import numpy as np path: Path diff --git a/numpy/typing/tests/data/fail/flatiter.pyi b/numpy/typing/tests/data/fail/flatiter.pyi index 06e23fed9e3f..2c6e912bd318 100644 --- a/numpy/typing/tests/data/fail/flatiter.pyi +++ b/numpy/typing/tests/data/fail/flatiter.pyi @@ -1,20 +1,38 @@ +from typing import Any + import numpy as np -import numpy._typing as npt +import numpy.typing as npt -class Index: +class _Index: def __index__(self) -> int: ... -a: np.flatiter[npt.NDArray[np.float64]] -supports_array: npt._SupportsArray[np.dtype[np.float64]] +class _MyArray: + def __array__(self) -> np.ndarray[tuple[int], np.dtypes.Float64DType]: ... + +_index: _Index +_my_array: _MyArray +_something: Any +_dtype: np.dtype[np.int8] + +_a_nd: np.flatiter[npt.NDArray[np.float64]] -a.base = object() # type: ignore[assignment, misc] -a.coords = object() # type: ignore[assignment, misc] -a.index = object() # type: ignore[assignment, misc] -a.copy(order='C') # type: ignore[call-arg] +### + +_a_nd.base = _something # type: ignore[misc] +_a_nd.coords = _something # type: ignore[misc] +_a_nd.index = _something # type: ignore[misc] + +_a_nd.copy("C") # type: ignore[call-arg] +_a_nd.copy(order="C") # type: ignore[call-arg] # NOTE: Contrary to `ndarray.__getitem__` its counterpart in `flatiter` # does not accept objects with the `__array__` or `__index__` protocols; # boolean indexing is just plain broken (gh-17175) -a[np.bool()] # type: ignore[index] -a[Index()] # type: ignore[call-overload] -a[supports_array] # type: ignore[index] +_a_nd[np.True_] # type: ignore[call-overload] +_a_nd[_index] # type: ignore[call-overload] +_a_nd[_my_array] # type: ignore[call-overload] + +# `dtype` and `copy` are no-ops in `flatiter.__array__` +_a_nd.__array__(_dtype) # type: ignore[arg-type] +_a_nd.__array__(dtype=_dtype) # type: ignore[call-arg] +_a_nd.__array__(copy=True) # type: ignore[arg-type] diff --git a/numpy/typing/tests/data/fail/fromnumeric.pyi b/numpy/typing/tests/data/fail/fromnumeric.pyi index 51ef26810e21..92b0cb366207 100644 --- a/numpy/typing/tests/data/fail/fromnumeric.pyi +++ b/numpy/typing/tests/data/fail/fromnumeric.pyi @@ -32,18 +32,18 @@ np.swapaxes(A, 1, [0]) # type: ignore[call-overload] np.transpose(A, axes=1.0) # type: ignore[call-overload] np.partition(a, None) # type: ignore[call-overload] -np.partition(a, 0, axis="bob") # type: ignore[call-overload] +np.partition(a, 0, axis="bob") # type: ignore[call-overload] np.partition(A, 0, kind="bob") # type: ignore[call-overload] np.partition(A, 0, order=range(5)) # type: ignore[arg-type] np.argpartition(a, None) # type: ignore[arg-type] np.argpartition(a, 0, axis="bob") # type: ignore[arg-type] -np.argpartition(A, 0, kind="bob") # type: ignore[arg-type] +np.argpartition(A, 0, kind="bob") # type: ignore[arg-type] np.argpartition(A, 0, order=range(5)) # type: ignore[arg-type] np.sort(A, axis="bob") # type: ignore[call-overload] np.sort(A, kind="bob") # type: ignore[call-overload] -np.sort(A, order=range(5)) # type: ignore[arg-type] +np.sort(A, order=range(5)) # type: ignore[arg-type] np.argsort(A, axis="bob") # type: ignore[arg-type] np.argsort(A, kind="bob") # type: ignore[arg-type] @@ -137,12 +137,12 @@ np.mean(AR_M) # type: ignore[arg-type] np.std(a, axis=1.0) # type: ignore[call-overload] np.std(a, out=False) # type: ignore[call-overload] -np.std(a, ddof='test') # type: ignore[call-overload] +np.std(a, ddof="test") # type: ignore[call-overload] np.std(a, keepdims=1.0) # type: ignore[call-overload] np.std(AR_U) # type: ignore[arg-type] np.var(a, axis=1.0) # type: ignore[call-overload] np.var(a, out=False) # type: ignore[call-overload] -np.var(a, ddof='test') # type: ignore[call-overload] +np.var(a, ddof="test") # type: ignore[call-overload] np.var(a, keepdims=1.0) # type: ignore[call-overload] np.var(AR_U) # type: ignore[arg-type] diff --git a/numpy/typing/tests/data/fail/lib_function_base.pyi b/numpy/typing/tests/data/fail/lib_function_base.pyi index f0bf6347691d..d7be993bf04c 100644 --- a/numpy/typing/tests/data/fail/lib_function_base.pyi +++ b/numpy/typing/tests/data/fail/lib_function_base.pyi @@ -13,34 +13,32 @@ AR_b_list: list[npt.NDArray[np.bool]] def fn_none_i(a: None, /) -> npt.NDArray[Any]: ... def fn_ar_i(a: npt.NDArray[np.float64], posarg: int, /) -> npt.NDArray[Any]: ... -np.average(AR_m) # type: ignore[arg-type] -np.select(1, [AR_f8]) # type: ignore[arg-type] -np.angle(AR_m) # type: ignore[arg-type] -np.unwrap(AR_m) # type: ignore[arg-type] -np.unwrap(AR_c16) # type: ignore[arg-type] +np.average(AR_m) # type: ignore[type-var] +np.select(1, [AR_f8]) # type: ignore[call-overload] +np.angle(AR_m) # type: ignore[type-var] +np.unwrap(AR_m) # type: ignore[type-var] +np.unwrap(AR_c16) # type: ignore[type-var] np.trim_zeros(1) # type: ignore[arg-type] np.place(1, [True], 1.5) # type: ignore[arg-type] np.vectorize(1) # type: ignore[arg-type] np.place(AR_f8, slice(None), 5) # type: ignore[arg-type] -np.piecewise(AR_f8, True, [fn_ar_i], 42) # type: ignore[call-overload] -# TODO: enable these once mypy actually supports ParamSpec (released in 2021) -# NOTE: pyright correctly reports errors for these (`reportCallIssue`) -# np.piecewise(AR_f8, AR_b_list, [fn_none_i]) # type: ignore[call-overload]s -# np.piecewise(AR_f8, AR_b_list, [fn_ar_i]) # type: ignore[call-overload] -# np.piecewise(AR_f8, AR_b_list, [fn_ar_i], 3.14) # type: ignore[call-overload] -# np.piecewise(AR_f8, AR_b_list, [fn_ar_i], 42, None) # type: ignore[call-overload] -# np.piecewise(AR_f8, AR_b_list, [fn_ar_i], 42, _=None) # type: ignore[call-overload] +np.piecewise(AR_f8, True, [fn_ar_i], "wrong") # type: ignore[call-overload] +np.piecewise(AR_f8, AR_b_list, [fn_none_i]) # type: ignore[call-overload] +np.piecewise(AR_f8, AR_b_list, [fn_ar_i]) # type: ignore[call-overload] +np.piecewise(AR_f8, AR_b_list, [fn_ar_i], 3.14) # type: ignore[call-overload] +np.piecewise(AR_f8, AR_b_list, [fn_ar_i], 42, None) # type: ignore[call-overload] +np.piecewise(AR_f8, AR_b_list, [fn_ar_i], 42, _=None) # type: ignore[list-item] np.interp(AR_f8, AR_c16, AR_f8) # type: ignore[arg-type] np.interp(AR_c16, AR_f8, AR_f8) # type: ignore[arg-type] np.interp(AR_f8, AR_f8, AR_f8, period=AR_c16) # type: ignore[call-overload] np.interp(AR_f8, AR_f8, AR_O) # type: ignore[arg-type] -np.cov(AR_m) # type: ignore[arg-type] -np.cov(AR_O) # type: ignore[arg-type] -np.corrcoef(AR_m) # type: ignore[arg-type] -np.corrcoef(AR_O) # type: ignore[arg-type] +np.cov(AR_m) # type: ignore[type-var] +np.cov(AR_O) # type: ignore[type-var] +np.corrcoef(AR_m) # type: ignore[type-var] +np.corrcoef(AR_O) # type: ignore[type-var] np.corrcoef(AR_f8, bias=True) # type: ignore[call-overload] np.corrcoef(AR_f8, ddof=2) # type: ignore[call-overload] np.blackman(1j) # type: ignore[arg-type] @@ -49,8 +47,8 @@ np.hanning(1j) # type: ignore[arg-type] np.hamming(1j) # type: ignore[arg-type] np.hamming(AR_c16) # type: ignore[arg-type] np.kaiser(1j, 1) # type: ignore[arg-type] -np.sinc(AR_O) # type: ignore[arg-type] -np.median(AR_M) # type: ignore[arg-type] +np.sinc(AR_O) # type: ignore[type-var] +np.median(AR_M) # type: ignore[type-var] np.percentile(AR_f8, 50j) # type: ignore[call-overload] np.percentile(AR_f8, 50, interpolation="bob") # type: ignore[call-overload] diff --git a/numpy/typing/tests/data/fail/linalg.pyi b/numpy/typing/tests/data/fail/linalg.pyi index c4695ee671cd..eda82c48c85a 100644 --- a/numpy/typing/tests/data/fail/linalg.pyi +++ b/numpy/typing/tests/data/fail/linalg.pyi @@ -9,38 +9,38 @@ np.linalg.tensorsolve(AR_O, AR_O) # type: ignore[arg-type] np.linalg.solve(AR_O, AR_O) # type: ignore[arg-type] -np.linalg.tensorinv(AR_O) # type: ignore[arg-type] - -np.linalg.inv(AR_O) # type: ignore[arg-type] +np.linalg.tensorinv(AR_O) # type: ignore[type-var] +np.linalg.inv(AR_O) # type: ignore[type-var] +np.linalg.pinv(AR_O) # type: ignore[type-var] +np.linalg.cholesky(AR_O) # type: ignore[type-var] np.linalg.matrix_power(AR_M, 5) # type: ignore[arg-type] -np.linalg.cholesky(AR_O) # type: ignore[arg-type] +np.linalg.eig(AR_O) # type: ignore[arg-type] + +np.linalg.eigh(AR_O) # type: ignore[arg-type] +np.linalg.eigh(AR_O, UPLO="bob") # type: ignore[call-overload] -np.linalg.qr(AR_O) # type: ignore[arg-type] +np.linalg.qr(AR_O) # type: ignore[type-var] np.linalg.qr(AR_f8, mode="bob") # type: ignore[call-overload] +np.linalg.svd(AR_O) # type: ignore[arg-type] + np.linalg.eigvals(AR_O) # type: ignore[arg-type] np.linalg.eigvalsh(AR_O) # type: ignore[arg-type] np.linalg.eigvalsh(AR_O, UPLO="bob") # type: ignore[call-overload] -np.linalg.eig(AR_O) # type: ignore[arg-type] - -np.linalg.eigh(AR_O) # type: ignore[arg-type] -np.linalg.eigh(AR_O, UPLO="bob") # type: ignore[call-overload] - -np.linalg.svd(AR_O) # type: ignore[arg-type] - -np.linalg.cond(AR_O) # type: ignore[arg-type] -np.linalg.cond(AR_f8, p="bob") # type: ignore[arg-type] +np.linalg.svdvals(AR_O) # type: ignore[arg-type] +np.linalg.svdvals(AR_M) # type: ignore[arg-type] +np.linalg.svdvals(x=AR_f8) # type: ignore[call-overload] np.linalg.matrix_rank(AR_O) # type: ignore[arg-type] -np.linalg.pinv(AR_O) # type: ignore[arg-type] +np.linalg.cond(AR_O) # type: ignore[arg-type] +np.linalg.cond(AR_f8, p="bob") # type: ignore[call-overload] np.linalg.slogdet(AR_O) # type: ignore[arg-type] - np.linalg.det(AR_O) # type: ignore[arg-type] np.linalg.norm(AR_f8, ord="bob") # type: ignore[call-overload] diff --git a/numpy/typing/tests/data/fail/ma.pyi b/numpy/typing/tests/data/fail/ma.pyi index 41306b23fe78..06159f6e979e 100644 --- a/numpy/typing/tests/data/fail/ma.pyi +++ b/numpy/typing/tests/data/fail/ma.pyi @@ -1,22 +1,24 @@ -from typing import TypeAlias, TypeVar - import numpy as np import numpy.typing as npt -from numpy._typing import _Shape +from numpy._typing import _AnyShape -_ScalarT = TypeVar("_ScalarT", bound=np.generic) -MaskedArray: TypeAlias = np.ma.MaskedArray[_Shape, np.dtype[_ScalarT]] +type _MArray[ScalarT: np.generic] = np.ma.MaskedArray[_AnyShape, np.dtype[ScalarT]] MAR_1d_f8: np.ma.MaskedArray[tuple[int], np.dtype[np.float64]] -MAR_b: MaskedArray[np.bool] -MAR_c: MaskedArray[np.complex128] -MAR_td64: MaskedArray[np.timedelta64] +MAR_b: _MArray[np.bool] +MAR_c: _MArray[np.complex128] +MAR_td64: _MArray[np.timedelta64] AR_b: npt.NDArray[np.bool] MAR_1d_f8.shape = (3, 1) # type: ignore[assignment] MAR_1d_f8.dtype = np.bool # type: ignore[assignment] +def invalid_recordmask_setter() -> None: + # We make an inner function for this one to avoid the + # `NoReturn` causing an early exit for type checkers. + MAR_1d_f8.recordmask = [True] # type: ignore[assignment] + np.ma.min(MAR_1d_f8, axis=1.0) # type: ignore[call-overload] np.ma.min(MAR_1d_f8, keepdims=1.0) # type: ignore[call-overload] np.ma.min(MAR_1d_f8, out=1.0) # type: ignore[call-overload] @@ -77,13 +79,13 @@ MAR_1d_f8.any(axis=1.0) # type: ignore[call-overload] MAR_1d_f8.any(keepdims=1.0) # type: ignore[call-overload] MAR_1d_f8.any(out=1.0) # type: ignore[call-overload] -MAR_1d_f8.sort(axis=(0,1)) # type: ignore[arg-type] +MAR_1d_f8.sort(axis=(0, 1)) # type: ignore[arg-type] MAR_1d_f8.sort(axis=None) # type: ignore[arg-type] -MAR_1d_f8.sort(kind='cabbage') # type: ignore[arg-type] -MAR_1d_f8.sort(order=lambda: 'cabbage') # type: ignore[arg-type] -MAR_1d_f8.sort(endwith='cabbage') # type: ignore[arg-type] -MAR_1d_f8.sort(fill_value=lambda: 'cabbage') # type: ignore[arg-type] -MAR_1d_f8.sort(stable='cabbage') # type: ignore[arg-type] +MAR_1d_f8.sort(kind="cabbage") # type: ignore[arg-type] +MAR_1d_f8.sort(order=lambda: "cabbage") # type: ignore[arg-type] +MAR_1d_f8.sort(endwith="cabbage") # type: ignore[arg-type] +MAR_1d_f8.sort(fill_value=lambda: "cabbage") # type: ignore[arg-type] +MAR_1d_f8.sort(stable="cabbage") # type: ignore[arg-type] MAR_1d_f8.sort(stable=True) # type: ignore[arg-type] MAR_1d_f8.take(axis=1.0) # type: ignore[call-overload] @@ -95,49 +97,56 @@ np.ma.take(axis=1.0) # type: ignore[call-overload] np.ma.take(out=1) # type: ignore[call-overload] np.ma.take(mode="bob") # type: ignore[call-overload] -MAR_1d_f8.partition(['cabbage']) # type: ignore[arg-type] -MAR_1d_f8.partition(axis=(0,1)) # type: ignore[arg-type, call-arg] -MAR_1d_f8.partition(kind='cabbage') # type: ignore[arg-type, call-arg] -MAR_1d_f8.partition(order=lambda: 'cabbage') # type: ignore[arg-type, call-arg] +MAR_1d_f8.partition(["cabbage"]) # type: ignore[arg-type] +MAR_1d_f8.partition(axis=(0, 1)) # type: ignore[arg-type, call-arg] +MAR_1d_f8.partition(kind="cabbage") # type: ignore[arg-type, call-arg] +MAR_1d_f8.partition(order=lambda: "cabbage") # type: ignore[arg-type, call-arg] MAR_1d_f8.partition(AR_b) # type: ignore[arg-type] -MAR_1d_f8.argpartition(['cabbage']) # type: ignore[arg-type] -MAR_1d_f8.argpartition(axis=(0,1)) # type: ignore[arg-type, call-arg] -MAR_1d_f8.argpartition(kind='cabbage') # type: ignore[arg-type, call-arg] -MAR_1d_f8.argpartition(order=lambda: 'cabbage') # type: ignore[arg-type, call-arg] +MAR_1d_f8.argpartition(["cabbage"]) # type: ignore[arg-type] +MAR_1d_f8.argpartition(axis=(0, 1)) # type: ignore[arg-type, call-arg] +MAR_1d_f8.argpartition(kind="cabbage") # type: ignore[arg-type, call-arg] +MAR_1d_f8.argpartition(order=lambda: "cabbage") # type: ignore[arg-type, call-arg] MAR_1d_f8.argpartition(AR_b) # type: ignore[arg-type] -np.ma.ndim(lambda: 'lambda') # type: ignore[arg-type] +np.ma.ndim(lambda: "lambda") # type: ignore[arg-type] -np.ma.size(AR_b, axis='0') # type: ignore[arg-type] +np.ma.size(AR_b, axis="0") # type: ignore[arg-type] -MAR_1d_f8 >= (lambda x: 'mango') # type: ignore[operator] -MAR_1d_f8 > (lambda x: 'mango') # type: ignore[operator] -MAR_1d_f8 <= (lambda x: 'mango') # type: ignore[operator] -MAR_1d_f8 < (lambda x: 'mango') # type: ignore[operator] +MAR_1d_f8 >= (lambda x: "mango") # type: ignore[operator] +MAR_1d_f8 > (lambda x: "mango") # type: ignore[operator] +MAR_1d_f8 <= (lambda x: "mango") # type: ignore[operator] +MAR_1d_f8 < (lambda x: "mango") # type: ignore[operator] MAR_1d_f8.count(axis=0.) # type: ignore[call-overload] np.ma.count(MAR_1d_f8, axis=0.) # type: ignore[call-overload] -MAR_1d_f8.put(4, 999, mode='flip') # type: ignore[arg-type] +MAR_1d_f8.put(4, 999, mode="flip") # type: ignore[arg-type] -np.ma.put(MAR_1d_f8, 4, 999, mode='flip') # type: ignore[arg-type] +np.ma.put(MAR_1d_f8, 4, 999, mode="flip") # type: ignore[arg-type] -np.ma.put([1,1,3], 0, 999) # type: ignore[arg-type] +np.ma.put([1, 1, 3], 0, 999) # type: ignore[arg-type] -np.ma.compressed(lambda: 'compress me') # type: ignore[call-overload] +np.ma.compressed(lambda: "compress me") # type: ignore[call-overload] -np.ma.allequal(MAR_1d_f8, [1,2,3], fill_value=1.5) # type: ignore[arg-type] +np.ma.allequal(MAR_1d_f8, [1, 2, 3], fill_value=1.5) # type: ignore[arg-type] -np.ma.allclose(MAR_1d_f8, [1,2,3], masked_equal=4.5) # type: ignore[arg-type] -np.ma.allclose(MAR_1d_f8, [1,2,3], rtol='.4') # type: ignore[arg-type] -np.ma.allclose(MAR_1d_f8, [1,2,3], atol='.5') # type: ignore[arg-type] +np.ma.allclose(MAR_1d_f8, [1, 2, 3], masked_equal=4.5) # type: ignore[arg-type] +np.ma.allclose(MAR_1d_f8, [1, 2, 3], rtol=".4") # type: ignore[arg-type] +np.ma.allclose(MAR_1d_f8, [1, 2, 3], atol=".5") # type: ignore[arg-type] -MAR_1d_f8.__setmask__('mask') # type: ignore[arg-type] +MAR_1d_f8.__setmask__("mask") # type: ignore[arg-type] MAR_b *= 2 # type: ignore[arg-type] MAR_c //= 2 # type: ignore[misc] MAR_td64 **= 2 # type: ignore[misc] MAR_1d_f8.swapaxes(axis1=1, axis2=0) # type: ignore[call-arg] + +MAR_1d_f8.argsort(axis=(1, 0)) # type: ignore[arg-type] + +np.ma.MaskedArray(np.array([1, 2, 3]), keep_mask="yes") # type: ignore[call-overload] +np.ma.MaskedArray(np.array([1, 2, 3]), subok=None) # type: ignore[call-overload] +np.ma.MaskedArray(np.array([1, 2, 3]), ndim=None) # type: ignore[call-overload] +np.ma.MaskedArray(np.array([1, 2, 3]), order="Corinthian") # type: ignore[call-overload] diff --git a/numpy/typing/tests/data/fail/multiarray.pyi b/numpy/typing/tests/data/fail/multiarray.pyi index 1f9ef6894bad..51128dfbf6f7 100644 --- a/numpy/typing/tests/data/fail/multiarray.pyi +++ b/numpy/typing/tests/data/fail/multiarray.pyi @@ -26,10 +26,10 @@ np.copyto(AR_LIKE_f, AR_f8) # type: ignore[arg-type] np.putmask(AR_LIKE_f, [True, True, False], 1.5) # type: ignore[arg-type] np.packbits(AR_f8) # type: ignore[arg-type] -np.packbits(AR_u1, bitorder=">") # type: ignore[arg-type] +np.packbits(AR_u1, bitorder=">") # type: ignore[call-overload] np.unpackbits(AR_i8) # type: ignore[arg-type] -np.unpackbits(AR_u1, bitorder=">") # type: ignore[arg-type] +np.unpackbits(AR_u1, bitorder=">") # type: ignore[call-overload] np.shares_memory(1, 1, max_work=i8) # type: ignore[arg-type] np.may_share_memory(1, 1, max_work=i8) # type: ignore[arg-type] diff --git a/numpy/typing/tests/data/fail/ndarray_misc.pyi b/numpy/typing/tests/data/fail/ndarray_misc.pyi index 93e1bce8fecb..29418930061c 100644 --- a/numpy/typing/tests/data/fail/ndarray_misc.pyi +++ b/numpy/typing/tests/data/fail/ndarray_misc.pyi @@ -5,6 +5,7 @@ More extensive tests are performed for the methods' function-based counterpart in `../from_numeric.py`. """ +from typing import Never import numpy as np import numpy.typing as npt @@ -17,14 +18,26 @@ AR_b: npt.NDArray[np.bool] ctypes_obj = AR_f8.ctypes f8.argpartition(0) # type: ignore[attr-defined] -f8.diagonal() # type: ignore[attr-defined] -f8.dot(1) # type: ignore[attr-defined] -f8.nonzero() # type: ignore[attr-defined] f8.partition(0) # type: ignore[attr-defined] -f8.put(0, 2) # type: ignore[attr-defined] -f8.setfield(2, np.float64) # type: ignore[attr-defined] -f8.sort() # type: ignore[attr-defined] -f8.trace() # type: ignore[attr-defined] +f8.dot(1) # type: ignore[attr-defined] + +# NOTE: The following functions retur `Never`, causing mypy to stop analysis at that +# point, which we circumvent by wrapping them in a function. + +def f8_diagonal(x: np.float64) -> Never: + return x.diagonal() # type: ignore[misc] + +def f8_nonzero(x: np.float64) -> Never: + return x.nonzero() # type: ignore[misc] + +def f8_setfield(x: np.float64) -> Never: + return x.setfield(2, np.float64) # type: ignore[misc] + +def f8_sort(x: np.float64) -> Never: + return x.sort() # type: ignore[misc] + +def f8_trace(x: np.float64) -> Never: + return x.trace() # type: ignore[misc] AR_M.__complex__() # type: ignore[misc] AR_b.__index__() # type: ignore[misc] diff --git a/numpy/typing/tests/data/fail/nditer.pyi b/numpy/typing/tests/data/fail/nditer.pyi index cb64061e45fe..fae728da454e 100644 --- a/numpy/typing/tests/data/fail/nditer.pyi +++ b/numpy/typing/tests/data/fail/nditer.pyi @@ -5,4 +5,4 @@ class Test(np.nditer): ... # type: ignore[misc] np.nditer([0, 1], flags=["test"]) # type: ignore[list-item] np.nditer([0, 1], op_flags=[["test"]]) # type: ignore[list-item] np.nditer([0, 1], itershape=(1.0,)) # type: ignore[arg-type] -np.nditer([0, 1], buffersize=1.0) # type: ignore[arg-type] +np.nditer([0, 1], buffersize=1.0) # type: ignore[call-overload] diff --git a/numpy/typing/tests/data/fail/nested_sequence.pyi b/numpy/typing/tests/data/fail/nested_sequence.pyi index a28d3df3c749..1004a36accc7 100644 --- a/numpy/typing/tests/data/fail/nested_sequence.pyi +++ b/numpy/typing/tests/data/fail/nested_sequence.pyi @@ -1,4 +1,5 @@ from collections.abc import Sequence + from numpy._typing import _NestedSequence a: Sequence[float] diff --git a/numpy/typing/tests/data/fail/npyio.pyi b/numpy/typing/tests/data/fail/npyio.pyi index e204566a5877..e20be3a2a247 100644 --- a/numpy/typing/tests/data/fail/npyio.pyi +++ b/numpy/typing/tests/data/fail/npyio.pyi @@ -1,8 +1,8 @@ import pathlib from typing import IO -import numpy.typing as npt import numpy as np +import numpy.typing as npt str_path: str bytes_path: bytes @@ -12,8 +12,8 @@ AR_i8: npt.NDArray[np.int64] np.load(str_file) # type: ignore[arg-type] -np.save(bytes_path, AR_i8) # type: ignore[call-overload] -np.save(str_path, AR_i8, fix_imports=True) # type: ignore[deprecated] # pyright: ignore[reportDeprecated] +np.save(bytes_path, AR_i8) # type: ignore[arg-type] +np.save(str_path, AR_i8, fix_imports=True) # type: ignore[call-arg] np.savez(bytes_path, AR_i8) # type: ignore[arg-type] diff --git a/numpy/typing/tests/data/fail/scalars.pyi b/numpy/typing/tests/data/fail/scalars.pyi index bfbe9125e529..018a88e652ae 100644 --- a/numpy/typing/tests/data/fail/scalars.pyi +++ b/numpy/typing/tests/data/fail/scalars.pyi @@ -1,4 +1,3 @@ -import sys import numpy as np f2: np.float16 @@ -58,15 +57,15 @@ np.float64(value=0.0) # type: ignore[call-arg] np.int64(value=0) # type: ignore[call-arg] np.uint64(value=0) # type: ignore[call-arg] np.complex128(value=0.0j) # type: ignore[call-overload] -np.str_(value='bob') # type: ignore[call-overload] -np.bytes_(value=b'test') # type: ignore[call-overload] -np.void(value=b'test') # type: ignore[call-overload] +np.str_(value="bob") # type: ignore[call-overload] +np.bytes_(value=b"test") # type: ignore[call-overload] +np.void(value=b"test") # type: ignore[call-overload] np.bool(value=True) # type: ignore[call-overload] np.datetime64(value="2019") # type: ignore[call-overload] np.timedelta64(value=0) # type: ignore[call-overload] -np.bytes_(b"hello", encoding='utf-8') # type: ignore[call-overload] -np.str_("hello", encoding='utf-8') # type: ignore[call-overload] +np.bytes_(b"hello", encoding="utf-8") # type: ignore[call-overload] +np.str_("hello", encoding="utf-8") # type: ignore[call-overload] f8.item(1) # type: ignore[call-overload] f8.item((0, 1)) # type: ignore[arg-type] diff --git a/numpy/typing/tests/data/fail/shape.pyi b/numpy/typing/tests/data/fail/shape.pyi index fea055583073..a024d7bda273 100644 --- a/numpy/typing/tests/data/fail/shape.pyi +++ b/numpy/typing/tests/data/fail/shape.pyi @@ -1,4 +1,5 @@ from typing import Any + import numpy as np # test bounds of _ShapeT_co diff --git a/numpy/typing/tests/data/fail/twodim_base.pyi b/numpy/typing/tests/data/fail/twodim_base.pyi index d0f2b7ad8322..473419cecef1 100644 --- a/numpy/typing/tests/data/fail/twodim_base.pyi +++ b/numpy/typing/tests/data/fail/twodim_base.pyi @@ -1,32 +1,39 @@ -from typing import Any, TypeVar +from typing import type_check_only import numpy as np import numpy.typing as npt -def func1(ar: npt.NDArray[Any], a: int) -> npt.NDArray[np.str_]: ... +_0d_bool: np.bool +_nd_bool: npt.NDArray[np.bool] +_nd_td64: npt.NDArray[np.timedelta64] +_to_2d_bool: list[list[bool]] -def func2(ar: npt.NDArray[Any], a: float) -> float: ... +@type_check_only +def func1(ar: np.ndarray, a: int) -> npt.NDArray[np.str_]: ... +@type_check_only +def func2(ar: np.ndarray, a: float) -> float: ... -AR_b: npt.NDArray[np.bool] -AR_m: npt.NDArray[np.timedelta64] - -AR_LIKE_b: list[bool] +### np.eye(10, M=20.0) # type: ignore[call-overload] np.eye(10, k=2.5, dtype=int) # type: ignore[call-overload] -np.diag(AR_b, k=0.5) # type: ignore[call-overload] -np.diagflat(AR_b, k=0.5) # type: ignore[call-overload] +np.diag(_nd_bool, k=0.5) # type: ignore[call-overload] +np.diagflat(_nd_bool, k=0.5) # type: ignore[call-overload] np.tri(10, M=20.0) # type: ignore[call-overload] np.tri(10, k=2.5, dtype=int) # type: ignore[call-overload] -np.tril(AR_b, k=0.5) # type: ignore[call-overload] -np.triu(AR_b, k=0.5) # type: ignore[call-overload] +np.tril(_nd_bool, k=0.5) # type: ignore[call-overload] +np.triu(_nd_bool, k=0.5) # type: ignore[call-overload] -np.vander(AR_m) # type: ignore[arg-type] +np.vander(_nd_td64) # type: ignore[type-var] -np.histogram2d(AR_m) # type: ignore[call-overload] +np.histogram2d(_nd_td64) # type: ignore[call-overload] np.mask_indices(10, func1) # type: ignore[arg-type] np.mask_indices(10, func2, 10.5) # type: ignore[arg-type] + +np.tril_indices(3.14) # type: ignore[arg-type] + +np.tril_indices_from(_to_2d_bool) # type: ignore[arg-type] diff --git a/numpy/typing/tests/data/fail/type_check.pyi b/numpy/typing/tests/data/fail/type_check.pyi index 94b6ee425af5..8b68e996304c 100644 --- a/numpy/typing/tests/data/fail/type_check.pyi +++ b/numpy/typing/tests/data/fail/type_check.pyi @@ -1,5 +1,4 @@ import numpy as np -import numpy.typing as npt DTYPE_i8: np.dtype[np.int64] diff --git a/numpy/typing/tests/data/misc/extended_precision.pyi b/numpy/typing/tests/data/misc/extended_precision.pyi index 84b5f516bdde..7978faf4d5bd 100644 --- a/numpy/typing/tests/data/misc/extended_precision.pyi +++ b/numpy/typing/tests/data/misc/extended_precision.pyi @@ -1,8 +1,8 @@ +from typing import assert_type + import numpy as np from numpy._typing import _96Bit, _128Bit -from typing import assert_type - assert_type(np.float96(), np.floating[_96Bit]) assert_type(np.float128(), np.floating[_128Bit]) assert_type(np.complex192(), np.complexfloating[_96Bit, _96Bit]) diff --git a/numpy/typing/tests/data/mypy.ini b/numpy/typing/tests/data/mypy.ini index bca203260efa..4aa465ae087b 100644 --- a/numpy/typing/tests/data/mypy.ini +++ b/numpy/typing/tests/data/mypy.ini @@ -1,9 +1,8 @@ [mypy] +strict = True enable_error_code = deprecated, ignore-without-code, truthy-bool -strict_bytes = True -warn_unused_ignores = True -implicit_reexport = False disallow_any_unimported = True -disallow_any_generics = True +allow_redefinition_new = True +local_partial_types = True show_absolute_path = True pretty = True diff --git a/numpy/typing/tests/data/pass/arithmetic.py b/numpy/typing/tests/data/pass/arithmetic.py index b50d28e5fca5..e347ec096e21 100644 --- a/numpy/typing/tests/data/pass/arithmetic.py +++ b/numpy/typing/tests/data/pass/arithmetic.py @@ -1,9 +1,11 @@ from __future__ import annotations -from typing import Any +from typing import Any, cast + +import pytest + import numpy as np import numpy.typing as npt -import pytest c16 = np.complex128(1) f8 = np.float64(1) @@ -23,11 +25,11 @@ b = bool(1) c = complex(1) f = float(1) -i = int(1) +i = 1 class Object: - def __array__(self, dtype: np.typing.DTypeLike = None, + def __array__(self, dtype: np.typing.DTypeLike | None = None, copy: bool | None = None) -> np.ndarray[Any, np.dtype[np.object_]]: ret = np.empty((), dtype=object) ret[()] = self @@ -61,6 +63,7 @@ def __rpow__(self, value: Any) -> Object: AR_b: npt.NDArray[np.bool] = np.array([True]) AR_u: npt.NDArray[np.uint32] = np.array([1], dtype=np.uint32) AR_i: npt.NDArray[np.int64] = np.array([1]) +AR_integer: npt.NDArray[np.integer] = cast(npt.NDArray[np.integer], AR_i) AR_f: npt.NDArray[np.float64] = np.array([1.0]) AR_c: npt.NDArray[np.complex128] = np.array([1j]) AR_m: npt.NDArray[np.timedelta64] = np.array([np.timedelta64(1, "D")]) @@ -282,6 +285,10 @@ def __rpow__(self, value: Any) -> Object: AR_i *= AR_LIKE_u AR_i *= AR_LIKE_i +AR_integer *= AR_LIKE_b +AR_integer *= AR_LIKE_u +AR_integer *= AR_LIKE_i + AR_f *= AR_LIKE_b AR_f *= AR_LIKE_u AR_f *= AR_LIKE_i @@ -314,6 +321,10 @@ def __rpow__(self, value: Any) -> Object: AR_i **= AR_LIKE_u AR_i **= AR_LIKE_i +AR_integer **= AR_LIKE_b +AR_integer **= AR_LIKE_u +AR_integer **= AR_LIKE_i + AR_f **= AR_LIKE_b AR_f **= AR_LIKE_u AR_f **= AR_LIKE_i diff --git a/numpy/typing/tests/data/pass/array_constructors.py b/numpy/typing/tests/data/pass/array_constructors.py index 17b6fab93ad8..27cbffa06a5c 100644 --- a/numpy/typing/tests/data/pass/array_constructors.py +++ b/numpy/typing/tests/data/pass/array_constructors.py @@ -1,14 +1,14 @@ from typing import Any import numpy as np -import numpy.typing as npt + class Index: def __index__(self) -> int: return 0 -class SubClass(npt.NDArray[np.float64]): +class SubClass(np.ndarray[tuple[Any, ...], np.dtype[np.float64]]): pass diff --git a/numpy/typing/tests/data/pass/array_like.py b/numpy/typing/tests/data/pass/array_like.py index 264ec55da053..f1e09b03a4ec 100644 --- a/numpy/typing/tests/data/pass/array_like.py +++ b/numpy/typing/tests/data/pass/array_like.py @@ -1,11 +1,5 @@ -from __future__ import annotations - -from typing import TYPE_CHECKING - import numpy as np - -if TYPE_CHECKING: - from numpy._typing import NDArray, ArrayLike, _SupportsArray +from numpy._typing import ArrayLike, NDArray, _SupportsArray x1: ArrayLike = True x2: ArrayLike = 5 diff --git a/numpy/typing/tests/data/pass/arrayterator.py b/numpy/typing/tests/data/pass/arrayterator.py index 572be5e2fe29..a99c09a25231 100644 --- a/numpy/typing/tests/data/pass/arrayterator.py +++ b/numpy/typing/tests/data/pass/arrayterator.py @@ -2,6 +2,7 @@ from __future__ import annotations from typing import Any + import numpy as np AR_i8: np.ndarray[Any, np.dtype[np.int_]] = np.arange(10) diff --git a/numpy/typing/tests/data/pass/bitwise_ops.py b/numpy/typing/tests/data/pass/bitwise_ops.py index 22a245d21809..2d4815b0d940 100644 --- a/numpy/typing/tests/data/pass/bitwise_ops.py +++ b/numpy/typing/tests/data/pass/bitwise_ops.py @@ -9,7 +9,7 @@ b_ = np.bool(1) b = bool(1) -i = int(1) +i = 1 AR = np.array([0, 1, 2], dtype=np.int32) AR.setflags(write=False) diff --git a/numpy/typing/tests/data/pass/comparisons.py b/numpy/typing/tests/data/pass/comparisons.py index a461d8b660da..b2e52762c7a8 100644 --- a/numpy/typing/tests/data/pass/comparisons.py +++ b/numpy/typing/tests/data/pass/comparisons.py @@ -1,6 +1,7 @@ from __future__ import annotations -from typing import cast, Any +from typing import Any, cast + import numpy as np c16 = np.complex128() @@ -18,10 +19,10 @@ b_ = np.bool() -b = bool() +b = False c = complex() -f = float() -i = int() +f = 0.0 +i = 0 SEQ = (0, 1, 2, 3, 4) diff --git a/numpy/typing/tests/data/pass/flatiter.py b/numpy/typing/tests/data/pass/flatiter.py index e64e4261b8e7..70de3a67917d 100644 --- a/numpy/typing/tests/data/pass/flatiter.py +++ b/numpy/typing/tests/data/pass/flatiter.py @@ -9,11 +9,18 @@ iter(a) next(a) a[0] -a[[0, 1, 2]] a[...] a[:] a.__array__() -a.__array__(np.dtype(np.float64)) b = np.array([1]).flat a[b] + +a[0] = "1" +a[:] = "2" +a[...] = "3" +a[[]] = "4" +a[[0]] = "5" +a[[[0]]] = "6" +a[[[[[0]]]]] = "7" +a[b] = "8" diff --git a/numpy/typing/tests/data/pass/index_tricks.py b/numpy/typing/tests/data/pass/index_tricks.py index dfc4ff2f314a..ea98156a8f2e 100644 --- a/numpy/typing/tests/data/pass/index_tricks.py +++ b/numpy/typing/tests/data/pass/index_tricks.py @@ -1,5 +1,7 @@ from __future__ import annotations + from typing import Any + import numpy as np AR_LIKE_b = [[True, True], [True, True]] diff --git a/numpy/typing/tests/data/pass/lib_user_array.py b/numpy/typing/tests/data/pass/lib_user_array.py index 62b7e85d7ff1..f79dc38af508 100644 --- a/numpy/typing/tests/data/pass/lib_user_array.py +++ b/numpy/typing/tests/data/pass/lib_user_array.py @@ -3,7 +3,7 @@ from __future__ import annotations import numpy as np -from numpy.lib.user_array import container +from numpy.lib.user_array import container # type: ignore[deprecated] N = 10_000 W = H = int(N**0.5) diff --git a/numpy/typing/tests/data/pass/literal.py b/numpy/typing/tests/data/pass/literal.py index c8fa476210e3..f1e0cb2a69d3 100644 --- a/numpy/typing/tests/data/pass/literal.py +++ b/numpy/typing/tests/data/pass/literal.py @@ -1,9 +1,10 @@ from __future__ import annotations -from typing import Any, TYPE_CHECKING from functools import partial +from typing import TYPE_CHECKING, Any import pytest + import numpy as np if TYPE_CHECKING: diff --git a/numpy/typing/tests/data/pass/ma.py b/numpy/typing/tests/data/pass/ma.py index dc9474fe4069..72cbc5d9b98e 100644 --- a/numpy/typing/tests/data/pass/ma.py +++ b/numpy/typing/tests/data/pass/ma.py @@ -1,11 +1,13 @@ -from typing import Any, TypeAlias, TypeVar, cast +import datetime as dt +from typing import Any, cast import numpy as np import numpy.typing as npt from numpy._typing import _Shape -_ScalarT = TypeVar("_ScalarT", bound=np.generic) -MaskedArray: TypeAlias = np.ma.MaskedArray[_Shape, np.dtype[_ScalarT]] +type MaskedArray[ScalarT: np.generic] = np.ma.MaskedArray[_Shape, np.dtype[ScalarT]] + +# mypy: disable-error-code=no-untyped-call MAR_b: MaskedArray[np.bool] = np.ma.MaskedArray([True]) MAR_u: MaskedArray[np.uint32] = np.ma.MaskedArray([1], dtype=np.uint32) @@ -13,10 +15,16 @@ MAR_f: MaskedArray[np.float64] = np.ma.MaskedArray([1.0]) MAR_c: MaskedArray[np.complex128] = np.ma.MaskedArray([1j]) MAR_td64: MaskedArray[np.timedelta64] = np.ma.MaskedArray([np.timedelta64(1, "D")]) -MAR_M_dt64: MaskedArray[np.datetime64] = np.ma.MaskedArray([np.datetime64(1, "D")]) +MAR_dt64: MaskedArray[np.datetime64] = np.ma.MaskedArray([np.datetime64(1, "D")]) MAR_S: MaskedArray[np.bytes_] = np.ma.MaskedArray([b'foo'], dtype=np.bytes_) MAR_U: MaskedArray[np.str_] = np.ma.MaskedArray(['foo'], dtype=np.str_) -MAR_T = cast(np.ma.MaskedArray[Any, np.dtypes.StringDType], np.ma.MaskedArray(["a"], "T")) +MAR_T = cast(np.ma.MaskedArray[Any, np.dtypes.StringDType], + np.ma.MaskedArray(["a"], dtype="T")) +MAR_V: MaskedArray[np.void] = np.ma.MaskedArray( + [(1, 1)], + mask=[(False, False)], + dtype=[('a', int), ('b', int)] +) AR_b: npt.NDArray[np.bool] = np.array([True, False, True]) @@ -31,6 +39,23 @@ MAR_f.mask = AR_b MAR_f.mask = np.False_ +MAR_i.fill_value = 0 + +MAR_b.flat[MAR_i > 0] = False +MAR_i.flat[:] = 1 +MAR_f.flat[[0]] = AR_LIKE_f +MAR_c.flat[[0, 0]] = [3, 4 + 3j] +MAR_td64.flat[0] = dt.timedelta(1) +MAR_dt64.flat[0] = dt.datetime(2020, 1, 1) + +MAR_b[MAR_i > 0] = False +MAR_i[:] = 1 +MAR_f[[0]] = AR_LIKE_f +MAR_c[[0, 0]] = [3, 4 + 3j] +MAR_td64[0] = dt.timedelta(1) +MAR_dt64[0] = dt.datetime(2020, 1, 1) +MAR_V['a'] = [2] + # Inplace addition MAR_b += AR_LIKE_b @@ -58,10 +83,10 @@ MAR_td64 += AR_LIKE_u MAR_td64 += AR_LIKE_i MAR_td64 += AR_LIKE_m -MAR_M_dt64 += AR_LIKE_b -MAR_M_dt64 += AR_LIKE_u -MAR_M_dt64 += AR_LIKE_i -MAR_M_dt64 += AR_LIKE_m +MAR_dt64 += AR_LIKE_b +MAR_dt64 += AR_LIKE_u +MAR_dt64 += AR_LIKE_i +MAR_dt64 += AR_LIKE_m MAR_S += b'snakes' MAR_U += 'snakes' @@ -90,10 +115,10 @@ MAR_td64 -= AR_LIKE_u MAR_td64 -= AR_LIKE_i MAR_td64 -= AR_LIKE_m -MAR_M_dt64 -= AR_LIKE_b -MAR_M_dt64 -= AR_LIKE_u -MAR_M_dt64 -= AR_LIKE_i -MAR_M_dt64 -= AR_LIKE_m +MAR_dt64 -= AR_LIKE_b +MAR_dt64 -= AR_LIKE_u +MAR_dt64 -= AR_LIKE_i +MAR_dt64 -= AR_LIKE_m # Inplace floor division diff --git a/numpy/typing/tests/data/pass/mod.py b/numpy/typing/tests/data/pass/mod.py index 2b7e6cd85c73..464326486fa2 100644 --- a/numpy/typing/tests/data/pass/mod.py +++ b/numpy/typing/tests/data/pass/mod.py @@ -13,7 +13,7 @@ b = bool(1) f = float(1) -i = int(1) +i = 1 AR = np.array([1], dtype=np.bool) AR.setflags(write=False) diff --git a/numpy/typing/tests/data/pass/multiarray.py b/numpy/typing/tests/data/pass/multiarray.py index 26cedfd77566..3a505590b5d3 100644 --- a/numpy/typing/tests/data/pass/multiarray.py +++ b/numpy/typing/tests/data/pass/multiarray.py @@ -70,7 +70,8 @@ np.unpackbits(AR_u1) np.shares_memory(1, 2) -np.shares_memory(AR_f8, AR_f8, max_work=1) +np.shares_memory(AR_f8, AR_f8, max_work=-1) np.may_share_memory(1, 2) -np.may_share_memory(AR_f8, AR_f8, max_work=1) +np.may_share_memory(AR_f8, AR_f8, max_work=0) +np.may_share_memory(AR_f8, AR_f8, max_work=-1) diff --git a/numpy/typing/tests/data/pass/ndarray_conversion.py b/numpy/typing/tests/data/pass/ndarray_conversion.py index 76da1dadd327..a4e0bcf34bdd 100644 --- a/numpy/typing/tests/data/pass/ndarray_conversion.py +++ b/numpy/typing/tests/data/pass/ndarray_conversion.py @@ -74,14 +74,8 @@ # setflags nd.setflags() - -nd.setflags(True) nd.setflags(write=True) - -nd.setflags(True, True) nd.setflags(write=True, align=True) - -nd.setflags(True, True, False) nd.setflags(write=True, align=True, uic=False) # fill is pretty simple diff --git a/numpy/typing/tests/data/pass/ndarray_misc.py b/numpy/typing/tests/data/pass/ndarray_misc.py index bb290cdf12f7..d50becb20ee4 100644 --- a/numpy/typing/tests/data/pass/ndarray_misc.py +++ b/numpy/typing/tests/data/pass/ndarray_misc.py @@ -9,13 +9,15 @@ from __future__ import annotations import operator -from typing import cast, Any +from typing import Any, cast import numpy as np import numpy.typing as npt -class SubClass(npt.NDArray[np.float64]): ... -class IntSubClass(npt.NDArray[np.intp]): ... + +class SubClass(np.ndarray[tuple[Any, ...], np.dtype[np.float64]]): ... +class IntSubClass(np.ndarray[tuple[Any, ...], np.dtype[np.intp]]): ... + i4 = np.int32(1) A: np.ndarray[Any, np.dtype[np.int32]] = np.array([[1]], dtype=np.int32) @@ -51,7 +53,12 @@ class IntSubClass(npt.NDArray[np.intp]): ... A.argmin(out=B_int0) i4.argsort() +i4.argsort(stable=True) A.argsort() +A.argsort(stable=True) + +A.sort() +A.sort(stable=True) i4.choose([()]) _choices = np.array([[0, 1, 2], [3, 4, 5], [6, 7, 8]], dtype=np.int32) @@ -119,7 +126,7 @@ class IntSubClass(npt.NDArray[np.intp]): ... i4.std() A.std() A.std(axis=0) -A.std(keepdims=True) +A.std(keepdims=True, mean=0.) A.std(out=B0.astype(np.float64)) i4.sum() @@ -137,7 +144,7 @@ class IntSubClass(npt.NDArray[np.intp]): ... i4.var() A.var() A.var(axis=0) -A.var(keepdims=True) +A.var(keepdims=True, mean=0.) A.var(out=B0) A.argpartition([0]) @@ -185,14 +192,3 @@ class IntSubClass(npt.NDArray[np.intp]): ... A_void: npt.NDArray[np.void] = np.empty(3, [("yop", float), ("yap", float)]) A_void["yop"] = A_float[:, 0] A_void["yap"] = A_float[:, 1] - -# deprecated - -with np.testing.assert_warns(DeprecationWarning): - ctypes_obj.get_data() # type: ignore[deprecated] # pyright: ignore[reportDeprecated] -with np.testing.assert_warns(DeprecationWarning): - ctypes_obj.get_shape() # type: ignore[deprecated] # pyright: ignore[reportDeprecated] -with np.testing.assert_warns(DeprecationWarning): - ctypes_obj.get_strides() # type: ignore[deprecated] # pyright: ignore[reportDeprecated] -with np.testing.assert_warns(DeprecationWarning): - ctypes_obj.get_as_parameter() # type: ignore[deprecated] # pyright: ignore[reportDeprecated] diff --git a/numpy/typing/tests/data/pass/numeric.py b/numpy/typing/tests/data/pass/numeric.py index 1eb14cf3a2a2..825a6dd74f34 100644 --- a/numpy/typing/tests/data/pass/numeric.py +++ b/numpy/typing/tests/data/pass/numeric.py @@ -2,25 +2,20 @@ Tests for :mod:`numpy._core.numeric`. Does not include tests which fall under ``array_constructors``. - """ -from __future__ import annotations -from typing import cast +from typing import Any import numpy as np -import numpy.typing as npt -class SubClass(npt.NDArray[np.float64]): ... + +class SubClass(np.ndarray[tuple[Any, ...], np.dtype[np.float64]]): ... i8 = np.int64(1) -A = cast( - np.ndarray[tuple[int, int, int], np.dtype[np.intp]], - np.arange(27).reshape(3, 3, 3), -) -B: list[list[list[int]]] = A.tolist() +A = np.arange(27).reshape(3, 3, 3) +B = A.tolist() C = np.empty((27, 27)).view(SubClass) np.count_nonzero(i8) diff --git a/numpy/typing/tests/data/pass/random.py b/numpy/typing/tests/data/pass/random.py index bce204a7378e..fd07e378e553 100644 --- a/numpy/typing/tests/data/pass/random.py +++ b/numpy/typing/tests/data/pass/random.py @@ -1,6 +1,7 @@ from __future__ import annotations from typing import Any + import numpy as np SEED_NONE = None diff --git a/numpy/typing/tests/data/pass/recfunctions.py b/numpy/typing/tests/data/pass/recfunctions.py index 52a3d78a7622..586f0502b366 100644 --- a/numpy/typing/tests/data/pass/recfunctions.py +++ b/numpy/typing/tests/data/pass/recfunctions.py @@ -12,7 +12,7 @@ def test_recursive_fill_fields() -> None: [(1, 10.0), (2, 20.0)], dtype=[("A", np.int64), ("B", np.float64)], ) - b = np.zeros((int(3),), dtype=a.dtype) + b = np.zeros((3,), dtype=a.dtype) out = rfn.recursive_fill_fields(a, b) assert_type(out, np.ndarray[tuple[int], np.dtype[np.void]]) @@ -51,8 +51,8 @@ def test_get_fieldstructure() -> None: def test_merge_arrays() -> None: assert_type( rfn.merge_arrays(( - np.ones((int(2),), np.int_), - np.ones((int(3),), np.float64), + np.ones((2,), np.int_), + np.ones((3,), np.float64), )), np.recarray[tuple[int], np.dtype[np.void]], ) @@ -60,7 +60,7 @@ def test_merge_arrays() -> None: def test_drop_fields() -> None: ndtype = [("a", np.int64), ("b", [("b_a", np.double), ("b_b", np.int64)])] - a = np.ones((int(3),), dtype=ndtype) + a = np.ones((3,), dtype=ndtype) assert_type( rfn.drop_fields(a, "a"), @@ -78,7 +78,7 @@ def test_drop_fields() -> None: def test_rename_fields() -> None: ndtype = [("a", np.int64), ("b", [("b_a", np.double), ("b_b", np.int64)])] - a = np.ones((int(3),), dtype=ndtype) + a = np.ones((3,), dtype=ndtype) assert_type( rfn.rename_fields(a, {"a": "A", "b_b": "B_B"}), @@ -92,7 +92,7 @@ def test_repack_fields() -> None: assert_type(rfn.repack_fields(dt), np.dtype[np.void]) assert_type(rfn.repack_fields(dt.type(0)), np.void) assert_type( - rfn.repack_fields(np.ones((int(3),), dtype=dt)), + rfn.repack_fields(np.ones((3,), dtype=dt)), np.ndarray[tuple[int], np.dtype[np.void]], ) @@ -133,14 +133,14 @@ def test_require_fields() -> None: def test_stack_arrays() -> None: - x = np.zeros((int(2),), np.int32) + x = np.zeros((2,), np.int32) assert_type( rfn.stack_arrays(x), np.ndarray[tuple[int], np.dtype[np.int32]], ) - z = np.ones((int(2),), [("A", "|S3"), ("B", float)]) - zz = np.ones((int(2),), [("A", "|S3"), ("B", np.float64), ("C", np.float64)]) + z = np.ones((2,), [("A", "|S3"), ("B", float)]) + zz = np.ones((2,), [("A", "|S3"), ("B", np.float64), ("C", np.float64)]) assert_type( rfn.stack_arrays((z, zz)), np.ma.MaskedArray[tuple[Any, ...], np.dtype[np.void]], @@ -150,12 +150,15 @@ def test_stack_arrays() -> None: def test_find_duplicates() -> None: ndtype = np.dtype([("a", int)]) - a = np.ma.ones(7, mask=[0, 0, 1, 0, 0, 0, 1]).view(ndtype) - assert_type(rfn.find_duplicates(a), np.ma.MaskedArray[Any, np.dtype[np.void]]) + a = np.ma.ones(7).view(ndtype) + assert_type( + rfn.find_duplicates(a), + np.ma.MaskedArray[tuple[int], np.dtype[np.void]], + ) assert_type( rfn.find_duplicates(a, ignoremask=True, return_index=True), tuple[ - np.ma.MaskedArray[Any, np.dtype[np.void]], - np.ndarray[Any, np.dtype[np.int_]], + np.ma.MaskedArray[tuple[int], np.dtype[np.void]], + np.ndarray[tuple[int], np.dtype[np.int_]], ], ) diff --git a/numpy/typing/tests/data/pass/scalars.py b/numpy/typing/tests/data/pass/scalars.py index 655903a50bce..133c5627e6e5 100644 --- a/numpy/typing/tests/data/pass/scalars.py +++ b/numpy/typing/tests/data/pass/scalars.py @@ -1,6 +1,7 @@ import datetime as dt import pytest + import numpy as np b = np.bool() @@ -246,3 +247,16 @@ def __float__(self) -> float: c16.reshape(1) U.reshape(1) S.reshape(1) + +# Indexing scalars with any of {None, ..., tuple[()], tuple[None], tuple[...], +# tuple[None, ...]} should be valid +b[None] +i8[None] +u8[None] +f8[None] +c16[None] +c16[...] +c16[()] +c16[(None,)] +c16[(...,)] +c16[None, None] diff --git a/numpy/typing/tests/data/pass/shape.py b/numpy/typing/tests/data/pass/shape.py index 286c8a81dacf..e3b497bc0310 100644 --- a/numpy/typing/tests/data/pass/shape.py +++ b/numpy/typing/tests/data/pass/shape.py @@ -1,4 +1,4 @@ -from typing import Any, NamedTuple, cast +from typing import Any, NamedTuple import numpy as np diff --git a/numpy/typing/tests/data/pass/simple.py b/numpy/typing/tests/data/pass/simple.py index 8f44e6e76f83..003e9ee58bb1 100644 --- a/numpy/typing/tests/data/pass/simple.py +++ b/numpy/typing/tests/data/pass/simple.py @@ -1,9 +1,9 @@ """Simple expression that should pass with mypy.""" import operator +from collections.abc import Iterable import numpy as np import numpy.typing as npt -from collections.abc import Iterable # Basic checks array = np.array([1, 2]) @@ -165,4 +165,6 @@ def iterable_func(x: Iterable[object]) -> Iterable[object]: ~array # Other methods -np.array([1, 2]).transpose() +array.transpose() + +array @ array diff --git a/numpy/typing/tests/data/pass/simple_py3.py b/numpy/typing/tests/data/pass/simple_py3.py deleted file mode 100644 index c05a1ce612ac..000000000000 --- a/numpy/typing/tests/data/pass/simple_py3.py +++ /dev/null @@ -1,6 +0,0 @@ -import numpy as np - -array = np.array([1, 2]) - -# The @ operator is not in python 2 -array @ array diff --git a/numpy/typing/tests/data/pass/ufunclike.py b/numpy/typing/tests/data/pass/ufunclike.py index f993939ddba1..7e556d10bef7 100644 --- a/numpy/typing/tests/data/pass/ufunclike.py +++ b/numpy/typing/tests/data/pass/ufunclike.py @@ -1,5 +1,7 @@ from __future__ import annotations + from typing import Any + import numpy as np @@ -10,6 +12,9 @@ def __ceil__(self) -> Object: def __floor__(self) -> Object: return self + def __trunc__(self) -> Object: + return self + def __ge__(self, value: object) -> bool: return True @@ -27,12 +32,12 @@ def __array__(self, dtype: np.typing.DTypeLike | None = None, AR_LIKE_O = [Object(), Object(), Object()] AR_U: np.ndarray[Any, np.dtype[np.str_]] = np.zeros(3, dtype="U5") -np.fix(AR_LIKE_b) -np.fix(AR_LIKE_u) -np.fix(AR_LIKE_i) -np.fix(AR_LIKE_f) -np.fix(AR_LIKE_O) -np.fix(AR_LIKE_f, out=AR_U) +np.fix(AR_LIKE_b) # type: ignore[deprecated] +np.fix(AR_LIKE_u) # type: ignore[deprecated] +np.fix(AR_LIKE_i) # type: ignore[deprecated] +np.fix(AR_LIKE_f) # type: ignore[deprecated] +np.fix(AR_LIKE_O) # type: ignore[deprecated] +np.fix(AR_LIKE_f, out=AR_U) # type: ignore[deprecated] np.isposinf(AR_LIKE_b) np.isposinf(AR_LIKE_u) diff --git a/numpy/typing/tests/data/reveal/arithmetic.pyi b/numpy/typing/tests/data/reveal/arithmetic.pyi index 5dd78a197b8f..68fa5b5230a6 100644 --- a/numpy/typing/tests/data/reveal/arithmetic.pyi +++ b/numpy/typing/tests/data/reveal/arithmetic.pyi @@ -3,7 +3,7 @@ from typing import Any, assert_type import numpy as np import numpy.typing as npt -from numpy._typing import _32Bit, _64Bit, _128Bit +from numpy._typing import _64Bit, _128Bit b: bool c: complex @@ -64,7 +64,6 @@ AR_LIKE_m: list[np.timedelta64] AR_LIKE_M: list[np.datetime64] AR_LIKE_O: list[np.object_] - # Array subtraction assert_type(AR_number - AR_number, npt.NDArray[np.number]) @@ -407,14 +406,18 @@ assert_type(M8 - M8, np.timedelta64) assert_type(M8 - i, np.datetime64) assert_type(M8 - i8, np.datetime64) -assert_type(M8_none + m8, np.datetime64[None]) assert_type(M8_none + i, np.datetime64[None]) -assert_type(M8_none + i8, np.datetime64[None]) -assert_type(M8_none - M8, np.timedelta64[None]) -assert_type(M8_none - m8, np.datetime64[None]) assert_type(M8_none - i, np.datetime64[None]) + +assert_type(M8_none + i8, np.datetime64[None]) assert_type(M8_none - i8, np.datetime64[None]) +# NOTE: Mypy incorrectly infers `timedelta64[Any]`, but pyright behaves correctly. +assert_type(M8_none + m8, np.datetime64[None]) # type: ignore[assert-type] +assert_type(M8_none - M8, np.timedelta64[None]) # type: ignore[assert-type] +# NOTE: Mypy incorrectly infers `datetime64[Any]`, but pyright behaves correctly. +assert_type(M8_none - m8, np.datetime64[None]) # type: ignore[assert-type] + assert_type(m8 + m8, np.timedelta64) assert_type(m8 + i, np.timedelta64) assert_type(m8 + i8, np.timedelta64) @@ -429,7 +432,8 @@ assert_type(m8 / f4, np.timedelta64) assert_type(m8 / m8, np.float64) assert_type(m8 // m8, np.int64) assert_type(m8 % m8, np.timedelta64) -assert_type(divmod(m8, m8), tuple[np.int64, np.timedelta64]) +# NOTE: Mypy incorrectly infers `tuple[Any, ...]`, but pyright behaves correctly. +assert_type(divmod(m8, m8), tuple[np.int64, np.timedelta64]) # type: ignore[assert-type] assert_type(m8_none + m8, np.timedelta64[None]) assert_type(m8_none + i, np.timedelta64[None]) @@ -439,10 +443,12 @@ assert_type(m8_none - i8, np.timedelta64[None]) assert_type(m8_int + i, np.timedelta64[int]) assert_type(m8_int + m8_delta, np.timedelta64[int]) -assert_type(m8_int + m8, np.timedelta64[int | None]) +assert_type(m8_int + m8, np.timedelta64) assert_type(m8_int - i, np.timedelta64[int]) assert_type(m8_int - m8_delta, np.timedelta64[int]) -assert_type(m8_int - m8, np.timedelta64[int | None]) +assert_type(m8_int - m8_int, np.timedelta64[int]) +assert_type(m8_int - m8_none, np.timedelta64[None]) +assert_type(m8_int - m8, np.timedelta64) assert_type(m8_delta + date, dt.date) assert_type(m8_delta + time, dt.datetime) @@ -487,7 +493,7 @@ assert_type(c8 / b_, np.complex64) # Complex -assert_type(c16 + f16, np.complex128 | np.complexfloating[_128Bit, _128Bit]) +assert_type(c16 + f16, np.complexfloating) assert_type(c16 + c16, np.complex128) assert_type(c16 + f8, np.complex128) assert_type(c16 + i8, np.complex128) @@ -500,12 +506,12 @@ assert_type(c16 + c, np.complex128) assert_type(c16 + f, np.complex128) assert_type(c16 + AR_f, npt.NDArray[np.complex128]) -assert_type(f16 + c16, np.complex128 | np.complexfloating[_128Bit, _128Bit]) +assert_type(f16 + c16, np.complexfloating) assert_type(c16 + c16, np.complex128) assert_type(f8 + c16, np.complex128) assert_type(i8 + c16, np.complex128) assert_type(c8 + c16, np.complex128 | np.complex64) -assert_type(f4 + c16, np.complex128 | np.complex64) +assert_type(f4 + c16, np.complexfloating) assert_type(i4 + c16, np.complex128) assert_type(b_ + c16, np.complex128) assert_type(b + c16, np.complex128) @@ -513,10 +519,10 @@ assert_type(c + c16, np.complex128) assert_type(f + c16, np.complex128) assert_type(AR_f + c16, npt.NDArray[np.complex128]) -assert_type(c8 + f16, np.complexfloating[_32Bit, _32Bit] | np.complexfloating[_128Bit, _128Bit]) +assert_type(c8 + f16, np.complex64 | np.complexfloating[_128Bit, _128Bit]) assert_type(c8 + c16, np.complex64 | np.complex128) assert_type(c8 + f8, np.complex64 | np.complex128) -assert_type(c8 + i8, np.complexfloating[_32Bit, _32Bit] | np.complexfloating[_64Bit, _64Bit]) +assert_type(c8 + i8, np.complex64 | np.complexfloating[_64Bit, _64Bit]) assert_type(c8 + c8, np.complex64) assert_type(c8 + f4, np.complex64) assert_type(c8 + i4, np.complex64) @@ -541,7 +547,7 @@ assert_type(AR_f + c8, npt.NDArray[np.complexfloating]) # Float -assert_type(f8 + f16, np.float64 | np.floating[_128Bit]) +assert_type(f8 + f16, np.floating) assert_type(f8 + f8, np.float64) assert_type(f8 + i8, np.float64) assert_type(f8 + f4, np.float64) @@ -552,44 +558,44 @@ assert_type(f8 + c, np.float64 | np.complex128) assert_type(f8 + f, np.float64) assert_type(f8 + AR_f, npt.NDArray[np.float64]) -assert_type(f16 + f8, np.floating[_128Bit] | np.float64) +assert_type(f16 + f8, np.floating) assert_type(f8 + f8, np.float64) assert_type(i8 + f8, np.float64) -assert_type(f4 + f8, np.float32 | np.float64) -assert_type(i4 + f8,np.float64) +assert_type(f4 + f8, np.floating) +assert_type(i4 + f8, np.float64) assert_type(b_ + f8, np.float64) assert_type(b + f8, np.float64) assert_type(c + f8, np.complex128 | np.float64) assert_type(f + f8, np.float64) assert_type(AR_f + f8, npt.NDArray[np.float64]) -assert_type(f4 + f16, np.float32 | np.floating[_128Bit]) -assert_type(f4 + f8, np.float32 | np.float64) -assert_type(f4 + i8, np.float32 | np.floating[_64Bit]) +assert_type(f4 + f16, np.floating) +assert_type(f4 + f8, np.floating) +assert_type(f4 + i8, np.floating) assert_type(f4 + f4, np.float32) -assert_type(f4 + i4, np.float32) +assert_type(f4 + i4, np.floating) assert_type(f4 + b_, np.float32) assert_type(f4 + b, np.float32) -assert_type(f4 + c, np.complex64 | np.complex128) -assert_type(f4 + f, np.float32 | np.float64) +assert_type(f4 + c, np.complexfloating) +assert_type(f4 + f, np.float32) assert_type(f4 + AR_f, npt.NDArray[np.float64]) -assert_type(f16 + f4, np.floating[_128Bit] | np.float32) +assert_type(f16 + f4, np.floating) assert_type(f8 + f4, np.float64) -assert_type(i8 + f4, np.floating[_32Bit] | np.floating[_64Bit]) +assert_type(i8 + f4, np.floating) assert_type(f4 + f4, np.float32) -assert_type(i4 + f4, np.float32) +assert_type(i4 + f4, np.floating) assert_type(b_ + f4, np.float32) assert_type(b + f4, np.float32) -assert_type(c + f4, np.complex64 | np.complex128) -assert_type(f + f4, np.float64 | np.float32) +assert_type(c + f4, np.complexfloating) +assert_type(f + f4, np.float32) assert_type(AR_f + f4, npt.NDArray[np.float64]) # Int assert_type(i8 + i8, np.int64) assert_type(i8 + u8, Any) -assert_type(i8 + i4, np.signedinteger[_32Bit] | np.signedinteger[_64Bit]) +assert_type(i8 + i4, np.signedinteger) assert_type(i8 + u4, Any) assert_type(i8 + b_, np.int64) assert_type(i8 + b, np.int64) @@ -599,7 +605,7 @@ assert_type(i8 + AR_f, npt.NDArray[np.float64]) assert_type(u8 + u8, np.uint64) assert_type(u8 + i4, Any) -assert_type(u8 + u4, np.unsignedinteger[_32Bit] | np.unsignedinteger[_64Bit]) +assert_type(u8 + u4, np.unsignedinteger) assert_type(u8 + b_, np.uint64) assert_type(u8 + b, np.uint64) assert_type(u8 + c, np.complex128) @@ -608,7 +614,7 @@ assert_type(u8 + AR_f, npt.NDArray[np.float64]) assert_type(i8 + i8, np.int64) assert_type(u8 + i8, Any) -assert_type(i4 + i8, np.signedinteger[_32Bit] | np.signedinteger[_64Bit]) +assert_type(i4 + i8, np.signedinteger) assert_type(u4 + i8, Any) assert_type(b_ + i8, np.int64) assert_type(b + i8, np.int64) @@ -618,14 +624,14 @@ assert_type(AR_f + i8, npt.NDArray[np.float64]) assert_type(u8 + u8, np.uint64) assert_type(i4 + u8, Any) -assert_type(u4 + u8, np.unsignedinteger[_32Bit] | np.unsignedinteger[_64Bit]) +assert_type(u4 + u8, np.unsignedinteger) assert_type(b_ + u8, np.uint64) assert_type(b + u8, np.uint64) assert_type(c + u8, np.complex128) assert_type(f + u8, np.float64) assert_type(AR_f + u8, npt.NDArray[np.float64]) -assert_type(i4 + i8, np.signedinteger[_32Bit] | np.signedinteger[_64Bit]) +assert_type(i4 + i8, np.signedinteger) assert_type(i4 + i4, np.int32) assert_type(i4 + b_, np.int32) assert_type(i4 + b, np.int32) @@ -633,13 +639,13 @@ assert_type(i4 + AR_f, npt.NDArray[np.float64]) assert_type(u4 + i8, Any) assert_type(u4 + i4, Any) -assert_type(u4 + u8, np.unsignedinteger[_32Bit] | np.unsignedinteger[_64Bit]) +assert_type(u4 + u8, np.unsignedinteger) assert_type(u4 + u4, np.uint32) assert_type(u4 + b_, np.uint32) assert_type(u4 + b, np.uint32) assert_type(u4 + AR_f, npt.NDArray[np.float64]) -assert_type(i8 + i4, np.signedinteger[_32Bit] | np.signedinteger[_64Bit]) +assert_type(i8 + i4, np.signedinteger) assert_type(i4 + i4, np.int32) assert_type(b_ + i4, np.int32) assert_type(b + i4, np.int32) @@ -647,7 +653,7 @@ assert_type(AR_f + i4, npt.NDArray[np.float64]) assert_type(i8 + u4, Any) assert_type(i4 + u4, Any) -assert_type(u8 + u4, np.unsignedinteger[_32Bit] | np.unsignedinteger[_64Bit]) +assert_type(u8 + u4, np.unsignedinteger) assert_type(u4 + u4, np.uint32) assert_type(b_ + u4, np.uint32) assert_type(b + u4, np.uint32) diff --git a/numpy/typing/tests/data/reveal/array_constructors.pyi b/numpy/typing/tests/data/reveal/array_constructors.pyi index 7b27d57bfe23..36440fca9487 100644 --- a/numpy/typing/tests/data/reveal/array_constructors.pyi +++ b/numpy/typing/tests/data/reveal/array_constructors.pyi @@ -1,14 +1,15 @@ -import sys from collections import deque from pathlib import Path -from typing import Any, TypeVar, assert_type +from typing import Any, assert_type import numpy as np import numpy.typing as npt +from numpy._typing import _AnyShape -_ScalarT_co = TypeVar("_ScalarT_co", bound=np.generic, covariant=True) +class SubClass[ScalarT: np.generic](np.ndarray[_AnyShape, np.dtype[ScalarT]]): ... -class SubClass(npt.NDArray[_ScalarT_co]): ... +class IntoSubClass[ScalarT: np.generic]: + def __array__(self) -> SubClass[ScalarT]: ... i8: np.int64 @@ -16,6 +17,7 @@ A: npt.NDArray[np.float64] B: SubClass[np.float64] C: list[int] D: SubClass[np.float64 | np.int64] +E: IntoSubClass[np.float64 | np.int64] mixed_shape: tuple[int, np.int64] @@ -25,65 +27,68 @@ assert_type(np.empty_like(A), npt.NDArray[np.float64]) assert_type(np.empty_like(B), SubClass[np.float64]) assert_type(np.empty_like([1, 1.0]), npt.NDArray[Any]) assert_type(np.empty_like(A, dtype=np.int64), npt.NDArray[np.int64]) -assert_type(np.empty_like(A, dtype='c16'), npt.NDArray[Any]) +assert_type(np.empty_like(A, dtype="c16"), npt.NDArray[Any]) assert_type(np.array(A), npt.NDArray[np.float64]) assert_type(np.array(B), npt.NDArray[np.float64]) assert_type(np.array([1, 1.0]), npt.NDArray[Any]) assert_type(np.array(deque([1, 2, 3])), npt.NDArray[Any]) assert_type(np.array(A, dtype=np.int64), npt.NDArray[np.int64]) -assert_type(np.array(A, dtype='c16'), npt.NDArray[Any]) +assert_type(np.array(A, dtype="c16"), npt.NDArray[Any]) assert_type(np.array(A, like=A), npt.NDArray[np.float64]) assert_type(np.array(A, subok=True), npt.NDArray[np.float64]) assert_type(np.array(B, subok=True), SubClass[np.float64]) assert_type(np.array(B, subok=True, ndmin=0), SubClass[np.float64]) assert_type(np.array(B, subok=True, ndmin=1), SubClass[np.float64]) assert_type(np.array(D), npt.NDArray[np.float64 | np.int64]) +assert_type(np.array(E, subok=True), SubClass[np.float64 | np.int64]) +# https://github.com/numpy/numpy/issues/29245 +assert_type(np.array([], dtype=np.bool), npt.NDArray[np.bool]) assert_type(np.zeros([1, 5, 6]), npt.NDArray[np.float64]) assert_type(np.zeros([1, 5, 6], dtype=np.int64), npt.NDArray[np.int64]) -assert_type(np.zeros([1, 5, 6], dtype='c16'), npt.NDArray[Any]) +assert_type(np.zeros([1, 5, 6], dtype="c16"), npt.NDArray[Any]) assert_type(np.zeros(mixed_shape), npt.NDArray[np.float64]) assert_type(np.empty([1, 5, 6]), npt.NDArray[np.float64]) assert_type(np.empty([1, 5, 6], dtype=np.int64), npt.NDArray[np.int64]) -assert_type(np.empty([1, 5, 6], dtype='c16'), npt.NDArray[Any]) +assert_type(np.empty([1, 5, 6], dtype="c16"), npt.NDArray[Any]) assert_type(np.empty(mixed_shape), npt.NDArray[np.float64]) assert_type(np.concatenate(A), npt.NDArray[np.float64]) -assert_type(np.concatenate([A, A]), Any) # pyright correctly infers this as NDArray[float64] +assert_type(np.concatenate([A, A]), npt.NDArray[Any]) # pyright correctly infers this as NDArray[float64] assert_type(np.concatenate([[1], A]), npt.NDArray[Any]) assert_type(np.concatenate([[1], [1]]), npt.NDArray[Any]) assert_type(np.concatenate((A, A)), npt.NDArray[np.float64]) assert_type(np.concatenate(([1], [1])), npt.NDArray[Any]) assert_type(np.concatenate([1, 1.0]), npt.NDArray[Any]) assert_type(np.concatenate(A, dtype=np.int64), npt.NDArray[np.int64]) -assert_type(np.concatenate(A, dtype='c16'), npt.NDArray[Any]) +assert_type(np.concatenate(A, dtype="c16"), npt.NDArray[Any]) assert_type(np.concatenate([1, 1.0], out=A), npt.NDArray[np.float64]) assert_type(np.asarray(A), npt.NDArray[np.float64]) assert_type(np.asarray(B), npt.NDArray[np.float64]) assert_type(np.asarray([1, 1.0]), npt.NDArray[Any]) assert_type(np.asarray(A, dtype=np.int64), npt.NDArray[np.int64]) -assert_type(np.asarray(A, dtype='c16'), npt.NDArray[Any]) +assert_type(np.asarray(A, dtype="c16"), npt.NDArray[Any]) assert_type(np.asanyarray(A), npt.NDArray[np.float64]) assert_type(np.asanyarray(B), SubClass[np.float64]) assert_type(np.asanyarray([1, 1.0]), npt.NDArray[Any]) assert_type(np.asanyarray(A, dtype=np.int64), npt.NDArray[np.int64]) -assert_type(np.asanyarray(A, dtype='c16'), npt.NDArray[Any]) +assert_type(np.asanyarray(A, dtype="c16"), npt.NDArray[Any]) assert_type(np.ascontiguousarray(A), npt.NDArray[np.float64]) assert_type(np.ascontiguousarray(B), npt.NDArray[np.float64]) assert_type(np.ascontiguousarray([1, 1.0]), npt.NDArray[Any]) assert_type(np.ascontiguousarray(A, dtype=np.int64), npt.NDArray[np.int64]) -assert_type(np.ascontiguousarray(A, dtype='c16'), npt.NDArray[Any]) +assert_type(np.ascontiguousarray(A, dtype="c16"), npt.NDArray[Any]) assert_type(np.asfortranarray(A), npt.NDArray[np.float64]) assert_type(np.asfortranarray(B), npt.NDArray[np.float64]) assert_type(np.asfortranarray([1, 1.0]), npt.NDArray[Any]) assert_type(np.asfortranarray(A, dtype=np.int64), npt.NDArray[np.int64]) -assert_type(np.asfortranarray(A, dtype='c16'), npt.NDArray[Any]) +assert_type(np.asfortranarray(A, dtype="c16"), npt.NDArray[Any]) assert_type(np.fromstring("1 1 1", sep=" "), npt.NDArray[np.float64]) assert_type(np.fromstring(b"1 1 1", sep=" "), npt.NDArray[np.float64]) @@ -107,17 +112,23 @@ assert_type(np.frombuffer(A), npt.NDArray[np.float64]) assert_type(np.frombuffer(A, dtype=np.int64), npt.NDArray[np.int64]) assert_type(np.frombuffer(A, dtype="c16"), npt.NDArray[Any]) -assert_type(np.arange(False, True), np.ndarray[tuple[int], np.dtype[np.signedinteger]]) -assert_type(np.arange(10), np.ndarray[tuple[int], np.dtype[np.signedinteger]]) -assert_type(np.arange(0, 10, step=2), np.ndarray[tuple[int], np.dtype[np.signedinteger]]) -assert_type(np.arange(10.0), np.ndarray[tuple[int], np.dtype[np.floating]]) -assert_type(np.arange(start=0, stop=10.0), np.ndarray[tuple[int], np.dtype[np.floating]]) -assert_type(np.arange(np.timedelta64(0)), np.ndarray[tuple[int], np.dtype[np.timedelta64]]) -assert_type(np.arange(0, np.timedelta64(10)), np.ndarray[tuple[int], np.dtype[np.timedelta64]]) -assert_type(np.arange(np.datetime64("0"), np.datetime64("10")), np.ndarray[tuple[int], np.dtype[np.datetime64]]) +_x_bool: bool +_x_int: int +_x_float: float +_x_timedelta: np.timedelta64[int] +_x_datetime: np.datetime64[int] + +assert_type(np.arange(False, True), np.ndarray[tuple[int], np.dtype[np.int_]]) +assert_type(np.arange(10), np.ndarray[tuple[int], np.dtype[np.int_]]) +assert_type(np.arange(0, 10, step=2), np.ndarray[tuple[int], np.dtype[np.int_]]) +assert_type(np.arange(10.0), np.ndarray[tuple[int], np.dtype[np.float64 | Any]]) +assert_type(np.arange(0, stop=10.0), np.ndarray[tuple[int], np.dtype[np.float64 | Any]]) +assert_type(np.arange(_x_timedelta), np.ndarray[tuple[int], np.dtype[np.timedelta64]]) +assert_type(np.arange(0, _x_timedelta), np.ndarray[tuple[int], np.dtype[np.timedelta64]]) +assert_type(np.arange(_x_datetime, _x_datetime), np.ndarray[tuple[int], np.dtype[np.datetime64]]) assert_type(np.arange(10, dtype=np.float64), np.ndarray[tuple[int], np.dtype[np.float64]]) assert_type(np.arange(0, 10, step=2, dtype=np.int16), np.ndarray[tuple[int], np.dtype[np.int16]]) -assert_type(np.arange(10, dtype=int), np.ndarray[tuple[int], np.dtype]) +assert_type(np.arange(10, dtype=int), np.ndarray[tuple[int], np.dtype[np.int_]]) assert_type(np.arange(0, 10, dtype="f8"), np.ndarray[tuple[int], np.dtype]) assert_type(np.require(A), npt.NDArray[np.float64]) @@ -182,7 +193,10 @@ assert_type(np.ones(_shape_2d), np.ndarray[tuple[int, int], np.dtype[np.float64] assert_type(np.ones(_shape_nd), np.ndarray[tuple[int, ...], np.dtype[np.float64]]) assert_type(np.ones(_shape_1d, dtype=np.int64), np.ndarray[tuple[int], np.dtype[np.int64]]) assert_type(np.ones(_shape_like), npt.NDArray[np.float64]) -assert_type(np.ones(_shape_like, dtype=np.dtypes.Int64DType()), np.ndarray[Any, np.dtypes.Int64DType]) +assert_type( + np.ones(_shape_like, dtype=np.dtypes.Int64DType()), + np.ndarray[tuple[Any, ...], np.dtypes.Int64DType], +) assert_type(np.ones(_shape_like, dtype=int), npt.NDArray[Any]) assert_type(np.ones(mixed_shape), npt.NDArray[np.float64]) @@ -200,9 +214,23 @@ assert_type(np.indices([1, 2, 3], sparse=True), tuple[npt.NDArray[np.int_], ...] assert_type(np.fromfunction(func, (3, 5)), SubClass[np.float64]) -assert_type(np.identity(10), npt.NDArray[np.float64]) -assert_type(np.identity(10, dtype=np.int64), npt.NDArray[np.int64]) -assert_type(np.identity(10, dtype=int), npt.NDArray[Any]) +assert_type(np.identity(3), np.ndarray[tuple[int, int], np.dtype[np.float64]]) +assert_type(np.identity(3, dtype=np.int8), np.ndarray[tuple[int, int], np.dtype[np.int8]]) +assert_type(np.identity(3, dtype=bool), np.ndarray[tuple[int, int], np.dtype[np.bool]]) +assert_type(np.identity(3, dtype="bool"), np.ndarray[tuple[int, int], np.dtype[np.bool]]) +assert_type(np.identity(3, dtype="b1"), np.ndarray[tuple[int, int], np.dtype[np.bool]]) +assert_type(np.identity(3, dtype="?"), np.ndarray[tuple[int, int], np.dtype[np.bool]]) +assert_type(np.identity(3, dtype=int), np.ndarray[tuple[int, int], np.dtype[np.int_ | Any]]) +assert_type(np.identity(3, dtype="int"), np.ndarray[tuple[int, int], np.dtype[np.int_ | Any]]) +assert_type(np.identity(3, dtype="n"), np.ndarray[tuple[int, int], np.dtype[np.int_ | Any]]) +assert_type(np.identity(3, dtype=float), np.ndarray[tuple[int, int], np.dtype[np.float64 | Any]]) +assert_type(np.identity(3, dtype="float"), np.ndarray[tuple[int, int], np.dtype[np.float64 | Any]]) +assert_type(np.identity(3, dtype="f8"), np.ndarray[tuple[int, int], np.dtype[np.float64 | Any]]) +assert_type(np.identity(3, dtype="d"), np.ndarray[tuple[int, int], np.dtype[np.float64 | Any]]) +assert_type(np.identity(3, dtype=complex), np.ndarray[tuple[int, int], np.dtype[np.complex128 | Any]]) +assert_type(np.identity(3, dtype="complex"), np.ndarray[tuple[int, int], np.dtype[np.complex128 | Any]]) +assert_type(np.identity(3, dtype="c16"), np.ndarray[tuple[int, int], np.dtype[np.complex128 | Any]]) +assert_type(np.identity(3, dtype="D"), np.ndarray[tuple[int, int], np.dtype[np.complex128 | Any]]) assert_type(np.atleast_1d(A), npt.NDArray[np.float64]) assert_type(np.atleast_1d(C), npt.NDArray[Any]) @@ -220,28 +248,27 @@ assert_type(np.atleast_3d(A), npt.NDArray[np.float64]) assert_type(np.atleast_3d(A, A), tuple[npt.NDArray[np.float64], npt.NDArray[np.float64]]) assert_type(np.atleast_3d(A, A, A), tuple[npt.NDArray[np.float64], ...]) -assert_type(np.vstack([A, A]), np.ndarray[Any, Any]) # pyright correctly infers this as NDArray[float64] +assert_type(np.vstack([A, A]), npt.NDArray[np.float64]) assert_type(np.vstack([A, A], dtype=np.float32), npt.NDArray[np.float32]) assert_type(np.vstack([A, C]), npt.NDArray[Any]) assert_type(np.vstack([C, C]), npt.NDArray[Any]) -assert_type(np.hstack([A, A]), np.ndarray[Any, Any]) # pyright correctly infers this as NDArray[float64] +assert_type(np.hstack([A, A]), npt.NDArray[np.float64]) assert_type(np.hstack([A, A], dtype=np.float32), npt.NDArray[np.float32]) -assert_type(np.stack([A, A]), np.ndarray[Any, Any]) # pyright correctly infers this as NDArray[float64] +assert_type(np.stack([A, A]), npt.NDArray[np.float64]) assert_type(np.stack([A, A], dtype=np.float32), npt.NDArray[np.float32]) assert_type(np.stack([A, C]), npt.NDArray[Any]) assert_type(np.stack([C, C]), npt.NDArray[Any]) -assert_type(np.stack([A, A], axis=0), np.ndarray[Any, Any]) # pyright correctly infers this as NDArray[float64] +assert_type(np.stack([A, A], axis=0), npt.NDArray[np.float64]) assert_type(np.stack([A, A], out=B), SubClass[np.float64]) assert_type(np.block([[A, A], [A, A]]), npt.NDArray[Any]) # pyright correctly infers this as NDArray[float64] assert_type(np.block(C), npt.NDArray[Any]) -if sys.version_info >= (3, 12): - from collections.abc import Buffer +from collections.abc import Buffer - def create_array(obj: npt.ArrayLike) -> npt.NDArray[Any]: ... +def create_array(obj: npt.ArrayLike) -> npt.NDArray[Any]: ... - buffer: Buffer - assert_type(create_array(buffer), npt.NDArray[Any]) +buffer: Buffer +assert_type(create_array(buffer), npt.NDArray[Any]) diff --git a/numpy/typing/tests/data/reveal/arraypad.pyi b/numpy/typing/tests/data/reveal/arraypad.pyi index c5a443d93fe3..3d53d913a770 100644 --- a/numpy/typing/tests/data/reveal/arraypad.pyi +++ b/numpy/typing/tests/data/reveal/arraypad.pyi @@ -20,3 +20,8 @@ assert_type(np.pad(AR_LIKE, (2, 3), "constant"), npt.NDArray[Any]) assert_type(np.pad(AR_f8, (2, 3), mode_func), npt.NDArray[np.float64]) assert_type(np.pad(AR_f8, (2, 3), mode_func, a=1, b=2), npt.NDArray[np.float64]) + +assert_type(np.pad(AR_i8, {-1: (2, 3)}), npt.NDArray[np.int64]) +assert_type(np.pad(AR_i8, {-2: 4}), npt.NDArray[np.int64]) +pad_width: dict[int, int | tuple[int, int]] = {-1: (2, 3), -2: 4} +assert_type(np.pad(AR_i8, pad_width), npt.NDArray[np.int64]) diff --git a/numpy/typing/tests/data/reveal/arrayprint.pyi b/numpy/typing/tests/data/reveal/arrayprint.pyi index 3b339edced32..17e175edc2b7 100644 --- a/numpy/typing/tests/data/reveal/arrayprint.pyi +++ b/numpy/typing/tests/data/reveal/arrayprint.pyi @@ -1,6 +1,6 @@ import contextlib from collections.abc import Callable -from typing import Any, assert_type +from typing import assert_type import numpy as np import numpy.typing as npt @@ -12,7 +12,7 @@ func_int: Callable[[np.integer], str] assert_type(np.get_printoptions(), _FormatOptions) assert_type( - np.array2string(AR, formatter={'float_kind': func_float, 'int_kind': func_int}), + np.array2string(AR, formatter={"float_kind": func_float, "int_kind": func_int}), str, ) assert_type(np.format_float_scientific(1.0), str) diff --git a/numpy/typing/tests/data/reveal/arraysetops.pyi b/numpy/typing/tests/data/reveal/arraysetops.pyi index 7e5ca5c5717b..36fc0603dcfd 100644 --- a/numpy/typing/tests/data/reveal/arraysetops.pyi +++ b/numpy/typing/tests/data/reveal/arraysetops.pyi @@ -23,7 +23,8 @@ assert_type(np.ediff1d(AR_O), npt.NDArray[np.object_]) assert_type(np.ediff1d(AR_LIKE_f8, to_begin=[1, 1.5]), npt.NDArray[Any]) assert_type(np.intersect1d(AR_i8, AR_i8), npt.NDArray[np.int64]) -assert_type(np.intersect1d(AR_M, AR_M, assume_unique=True), npt.NDArray[np.datetime64]) +# NOTE: Mypy incorrectly infers `ndarray[Any, Any]`, but pyright behaves correctly. +assert_type(np.intersect1d(AR_M, AR_M, assume_unique=True), npt.NDArray[np.datetime64]) # type: ignore[assert-type] assert_type(np.intersect1d(AR_f8, AR_i8), npt.NDArray[Any]) assert_type( np.intersect1d(AR_f8, AR_f8, return_indices=True), @@ -31,7 +32,8 @@ assert_type( ) assert_type(np.setxor1d(AR_i8, AR_i8), npt.NDArray[np.int64]) -assert_type(np.setxor1d(AR_M, AR_M, assume_unique=True), npt.NDArray[np.datetime64]) +# NOTE: Mypy incorrectly infers `ndarray[Any, Any]`, but pyright behaves correctly. +assert_type(np.setxor1d(AR_M, AR_M, assume_unique=True), npt.NDArray[np.datetime64]) # type: ignore[assert-type] assert_type(np.setxor1d(AR_f8, AR_i8), npt.NDArray[Any]) assert_type(np.isin(AR_i8, AR_i8), npt.NDArray[np.bool]) @@ -40,11 +42,13 @@ assert_type(np.isin(AR_f8, AR_i8), npt.NDArray[np.bool]) assert_type(np.isin(AR_f8, AR_LIKE_f8, invert=True), npt.NDArray[np.bool]) assert_type(np.union1d(AR_i8, AR_i8), npt.NDArray[np.int64]) -assert_type(np.union1d(AR_M, AR_M), npt.NDArray[np.datetime64]) +# NOTE: Mypy incorrectly infers `ndarray[Any, Any]`, but pyright behaves correctly. +assert_type(np.union1d(AR_M, AR_M), npt.NDArray[np.datetime64]) # type: ignore[assert-type] assert_type(np.union1d(AR_f8, AR_i8), npt.NDArray[Any]) assert_type(np.setdiff1d(AR_i8, AR_i8), npt.NDArray[np.int64]) -assert_type(np.setdiff1d(AR_M, AR_M, assume_unique=True), npt.NDArray[np.datetime64]) +# NOTE: Mypy incorrectly infers `ndarray[Any, Any]`, but pyright behaves correctly. +assert_type(np.setdiff1d(AR_M, AR_M, assume_unique=True), npt.NDArray[np.datetime64]) # type: ignore[assert-type] assert_type(np.setdiff1d(AR_f8, AR_i8), npt.NDArray[Any]) assert_type(np.unique(AR_f8), npt.NDArray[np.float64]) diff --git a/numpy/typing/tests/data/reveal/arrayterator.pyi b/numpy/typing/tests/data/reveal/arrayterator.pyi index 470160c24de3..85eeff4add08 100644 --- a/numpy/typing/tests/data/reveal/arrayterator.pyi +++ b/numpy/typing/tests/data/reveal/arrayterator.pyi @@ -13,7 +13,7 @@ assert_type(ar_iter.start, list[int]) assert_type(ar_iter.stop, list[int]) assert_type(ar_iter.step, list[int]) assert_type(ar_iter.shape, tuple[Any, ...]) -assert_type(ar_iter.flat, Generator[np.int64, None, None]) +assert_type(ar_iter.flat, Generator[np.int64]) assert_type(ar_iter.__array__(), npt.NDArray[np.int64]) diff --git a/numpy/typing/tests/data/reveal/bitwise_ops.pyi b/numpy/typing/tests/data/reveal/bitwise_ops.pyi index 6c6b56197546..809f77d9736d 100644 --- a/numpy/typing/tests/data/reveal/bitwise_ops.pyi +++ b/numpy/typing/tests/data/reveal/bitwise_ops.pyi @@ -1,12 +1,10 @@ -from typing import Any, TypeAlias, assert_type -from typing import Literal as L +from typing import Literal as L, assert_type import numpy as np import numpy.typing as npt -from numpy._typing import _32Bit, _64Bit -FalseType: TypeAlias = L[False] -TrueType: TypeAlias = L[True] +type FalseType = L[False] +type TrueType = L[True] i4: np.int32 i8: np.int64 @@ -44,11 +42,11 @@ assert_type(i4 | i4, np.int32) assert_type(i4 ^ i4, np.int32) assert_type(i4 & i4, np.int32) -assert_type(i8 << i4, np.signedinteger[_32Bit] | np.signedinteger[_64Bit]) -assert_type(i8 >> i4, np.signedinteger[_32Bit] | np.signedinteger[_64Bit]) -assert_type(i8 | i4, np.signedinteger[_32Bit] | np.signedinteger[_64Bit]) -assert_type(i8 ^ i4, np.signedinteger[_32Bit] | np.signedinteger[_64Bit]) -assert_type(i8 & i4, np.signedinteger[_32Bit] | np.signedinteger[_64Bit]) +assert_type(i8 << i4, np.signedinteger) +assert_type(i8 >> i4, np.signedinteger) +assert_type(i8 | i4, np.signedinteger) +assert_type(i8 ^ i4, np.signedinteger) +assert_type(i8 & i4, np.signedinteger) assert_type(i8 << b_, np.int64) assert_type(i8 >> b_, np.int64) @@ -86,11 +84,11 @@ assert_type(u4 | i4, np.signedinteger) assert_type(u4 ^ i4, np.signedinteger) assert_type(u4 & i4, np.signedinteger) -assert_type(u4 << i, np.signedinteger) -assert_type(u4 >> i, np.signedinteger) -assert_type(u4 | i, np.signedinteger) -assert_type(u4 ^ i, np.signedinteger) -assert_type(u4 & i, np.signedinteger) +assert_type(u4 << i, np.uint32) +assert_type(u4 >> i, np.uint32) +assert_type(u4 | i, np.uint32) +assert_type(u4 ^ i, np.uint32) +assert_type(u4 & i, np.uint32) assert_type(u8 << b_, np.uint64) assert_type(u8 >> b_, np.uint64) diff --git a/numpy/typing/tests/data/reveal/char.pyi b/numpy/typing/tests/data/reveal/char.pyi index 9fdc9f61e893..b83ecc62221f 100644 --- a/numpy/typing/tests/data/reveal/char.pyi +++ b/numpy/typing/tests/data/reveal/char.pyi @@ -1,11 +1,11 @@ -from typing import TypeAlias, assert_type +from typing import assert_type import numpy as np import numpy._typing as np_t import numpy.typing as npt -AR_T_alias: TypeAlias = np.ndarray[np_t._AnyShape, np.dtypes.StringDType] -AR_TU_alias: TypeAlias = AR_T_alias | npt.NDArray[np.str_] +type AR_T_alias = np.ndarray[np_t._AnyShape, np.dtypes.StringDType] +type AR_TU_alias = AR_T_alias | npt.NDArray[np.str_] AR_U: npt.NDArray[np.str_] AR_S: npt.NDArray[np.bytes_] @@ -65,6 +65,7 @@ assert_type(np.char.join(AR_T, "_"), AR_TU_alias) assert_type(np.char.ljust(AR_U, 5), npt.NDArray[np.str_]) assert_type(np.char.ljust(AR_S, [4, 3, 1], fillchar=[b"a", b"b", b"c"]), npt.NDArray[np.bytes_]) +assert_type(np.char.ljust(AR_S, [4, 3, 1], fillchar="a"), npt.NDArray[np.bytes_]) assert_type(np.char.ljust(AR_T, 5), AR_T_alias) assert_type(np.char.ljust(AR_T, [4, 2, 1], fillchar=["a", "b", "c"]), AR_TU_alias) @@ -209,6 +210,9 @@ assert_type(np.char.array("bob", copy=True), np.char.chararray[np_t._AnyShape, n assert_type(np.char.array(b"bob", itemsize=5), np.char.chararray[np_t._AnyShape, np.dtype[np.bytes_]]) assert_type(np.char.array(1, unicode=False), np.char.chararray[np_t._AnyShape, np.dtype[np.bytes_]]) assert_type(np.char.array(1, unicode=True), np.char.chararray[np_t._AnyShape, np.dtype[np.str_]]) +assert_type(np.char.array(1), np.char.chararray[np_t._AnyShape, np.dtype[np.str_]] | np.char.chararray[np_t._AnyShape, np.dtype[np.bytes_]]) +assert_type(np.char.array(AR_U, unicode=False), np.char.chararray[np_t._AnyShape, np.dtype[np.bytes_]]) +assert_type(np.char.array(AR_S, unicode=True), np.char.chararray[np_t._AnyShape, np.dtype[np.str_]]) assert_type(np.char.asarray(AR_U), np.char.chararray[np_t._AnyShape, np.dtype[np.str_]]) assert_type(np.char.asarray(AR_S, order="K"), np.char.chararray[np_t._AnyShape, np.dtype[np.bytes_]]) @@ -216,3 +220,6 @@ assert_type(np.char.asarray("bob"), np.char.chararray[np_t._AnyShape, np.dtype[n assert_type(np.char.asarray(b"bob", itemsize=5), np.char.chararray[np_t._AnyShape, np.dtype[np.bytes_]]) assert_type(np.char.asarray(1, unicode=False), np.char.chararray[np_t._AnyShape, np.dtype[np.bytes_]]) assert_type(np.char.asarray(1, unicode=True), np.char.chararray[np_t._AnyShape, np.dtype[np.str_]]) +assert_type(np.char.asarray(1), np.char.chararray[np_t._AnyShape, np.dtype[np.str_]] | np.char.chararray[np_t._AnyShape, np.dtype[np.bytes_]]) +assert_type(np.char.asarray(AR_U, unicode=False), np.char.chararray[np_t._AnyShape, np.dtype[np.bytes_]]) +assert_type(np.char.asarray(AR_S, unicode=True), np.char.chararray[np_t._AnyShape, np.dtype[np.str_]]) diff --git a/numpy/typing/tests/data/reveal/chararray.pyi b/numpy/typing/tests/data/reveal/chararray.pyi index b5f4392b75c8..b93826922662 100644 --- a/numpy/typing/tests/data/reveal/chararray.pyi +++ b/numpy/typing/tests/data/reveal/chararray.pyi @@ -1,10 +1,10 @@ -from typing import Any, TypeAlias, assert_type +from typing import Any, assert_type import numpy as np import numpy.typing as npt -_BytesCharArray: TypeAlias = np.char.chararray[tuple[Any, ...], np.dtype[np.bytes_]] -_StrCharArray: TypeAlias = np.char.chararray[tuple[Any, ...], np.dtype[np.str_]] +type _BytesCharArray = np.char.chararray[tuple[Any, ...], np.dtype[np.bytes_]] +type _StrCharArray = np.char.chararray[tuple[Any, ...], np.dtype[np.str_]] AR_U: _StrCharArray AR_S: _BytesCharArray @@ -50,6 +50,7 @@ assert_type(AR_S.join([b"_", b""]), _BytesCharArray) assert_type(AR_U.ljust(5), _StrCharArray) assert_type(AR_S.ljust([4, 3, 1], fillchar=[b"a", b"b", b"c"]), _BytesCharArray) +assert_type(AR_S.ljust([4, 3, 1], fillchar="a"), _BytesCharArray) assert_type(AR_U.rjust(5), _StrCharArray) assert_type(AR_S.rjust([4, 3, 1], fillchar=[b"a", b"b", b"c"]), _BytesCharArray) diff --git a/numpy/typing/tests/data/reveal/comparisons.pyi b/numpy/typing/tests/data/reveal/comparisons.pyi index 2165d17fce34..6df5a3d94314 100644 --- a/numpy/typing/tests/data/reveal/comparisons.pyi +++ b/numpy/typing/tests/data/reveal/comparisons.pyi @@ -1,6 +1,6 @@ import decimal import fractions -from typing import Any, assert_type +from typing import assert_type import numpy as np import numpy.typing as npt @@ -20,10 +20,10 @@ td = np.timedelta64(0, "D") b_ = np.bool() -b = bool() +b = False c = complex() -f = float() -i = int() +f = 0.0 +i = 0 AR = np.array([0], dtype=np.int64) AR.setflags(write=False) diff --git a/numpy/typing/tests/data/reveal/ctypeslib.pyi b/numpy/typing/tests/data/reveal/ctypeslib.pyi index 0564d725cf62..e3558925e4d0 100644 --- a/numpy/typing/tests/data/reveal/ctypeslib.pyi +++ b/numpy/typing/tests/data/reveal/ctypeslib.pyi @@ -6,19 +6,17 @@ import numpy.typing as npt from numpy import ctypeslib AR_bool: npt.NDArray[np.bool] -AR_ubyte: npt.NDArray[np.ubyte] -AR_ushort: npt.NDArray[np.ushort] -AR_uintc: npt.NDArray[np.uintc] -AR_ulong: npt.NDArray[np.ulong] -AR_ulonglong: npt.NDArray[np.ulonglong] -AR_byte: npt.NDArray[np.byte] -AR_short: npt.NDArray[np.short] -AR_intc: npt.NDArray[np.intc] -AR_long: npt.NDArray[np.long] -AR_longlong: npt.NDArray[np.longlong] -AR_single: npt.NDArray[np.single] -AR_double: npt.NDArray[np.double] -AR_longdouble: npt.NDArray[np.longdouble] +AR_i8: npt.NDArray[np.int8] +AR_u8: npt.NDArray[np.uint8] +AR_i16: npt.NDArray[np.int16] +AR_u16: npt.NDArray[np.uint16] +AR_i32: npt.NDArray[np.int32] +AR_u32: npt.NDArray[np.uint32] +AR_i64: npt.NDArray[np.int64] +AR_u64: npt.NDArray[np.uint64] +AR_f32: npt.NDArray[np.float32] +AR_f64: npt.NDArray[np.float64] +AR_f80: npt.NDArray[np.longdouble] AR_void: npt.NDArray[np.void] pointer: ct._Pointer[Any] @@ -33,49 +31,56 @@ assert_type(np.ctypeslib.ndpointer(np.int64, shape=(10, 3)), type[ctypeslib._con assert_type(np.ctypeslib.ndpointer(int, shape=(1,)), type[np.ctypeslib._concrete_ndptr[np.dtype]]) assert_type(np.ctypeslib.as_ctypes_type(np.bool), type[ct.c_bool]) -assert_type(np.ctypeslib.as_ctypes_type(np.ubyte), type[ct.c_ubyte]) -assert_type(np.ctypeslib.as_ctypes_type(np.ushort), type[ct.c_ushort]) -assert_type(np.ctypeslib.as_ctypes_type(np.uintc), type[ct.c_uint]) -assert_type(np.ctypeslib.as_ctypes_type(np.byte), type[ct.c_byte]) -assert_type(np.ctypeslib.as_ctypes_type(np.short), type[ct.c_short]) -assert_type(np.ctypeslib.as_ctypes_type(np.intc), type[ct.c_int]) -assert_type(np.ctypeslib.as_ctypes_type(np.single), type[ct.c_float]) -assert_type(np.ctypeslib.as_ctypes_type(np.double), type[ct.c_double]) -assert_type(np.ctypeslib.as_ctypes_type(ct.c_double), type[ct.c_double]) +assert_type(np.ctypeslib.as_ctypes_type(np.int8), type[ct.c_int8]) +assert_type(np.ctypeslib.as_ctypes_type(np.uint8), type[ct.c_uint8]) +assert_type(np.ctypeslib.as_ctypes_type(np.int16), type[ct.c_int16]) +assert_type(np.ctypeslib.as_ctypes_type(np.uint16), type[ct.c_uint16]) +assert_type(np.ctypeslib.as_ctypes_type(np.int32), type[ct.c_int32]) +assert_type(np.ctypeslib.as_ctypes_type(np.uint32), type[ct.c_uint32]) +assert_type(np.ctypeslib.as_ctypes_type(np.int64), type[ct.c_int64]) +assert_type(np.ctypeslib.as_ctypes_type(np.uint64), type[ct.c_uint64]) +assert_type(np.ctypeslib.as_ctypes_type(np.float32), type[ct.c_float]) +assert_type(np.ctypeslib.as_ctypes_type(np.float64), type[ct.c_double]) +assert_type(np.ctypeslib.as_ctypes_type(np.longdouble), type[ct.c_longdouble]) +assert_type(np.ctypeslib.as_ctypes_type("?"), type[ct.c_bool]) +assert_type(np.ctypeslib.as_ctypes_type("intp"), type[ct.c_ssize_t]) assert_type(np.ctypeslib.as_ctypes_type("q"), type[ct.c_longlong]) +assert_type(np.ctypeslib.as_ctypes_type("i8"), type[ct.c_int64]) +assert_type(np.ctypeslib.as_ctypes_type("f8"), type[ct.c_double]) assert_type(np.ctypeslib.as_ctypes_type([("i8", np.int64), ("f8", np.float64)]), type[Any]) -assert_type(np.ctypeslib.as_ctypes_type("i8"), type[Any]) -assert_type(np.ctypeslib.as_ctypes_type("f8"), type[Any]) assert_type(np.ctypeslib.as_ctypes(AR_bool.take(0)), ct.c_bool) -assert_type(np.ctypeslib.as_ctypes(AR_ubyte.take(0)), ct.c_ubyte) -assert_type(np.ctypeslib.as_ctypes(AR_ushort.take(0)), ct.c_ushort) -assert_type(np.ctypeslib.as_ctypes(AR_uintc.take(0)), ct.c_uint) +assert_type(np.ctypeslib.as_ctypes(AR_u8.take(0)), ct.c_uint8) +assert_type(np.ctypeslib.as_ctypes(AR_u16.take(0)), ct.c_uint16) +assert_type(np.ctypeslib.as_ctypes(AR_u32.take(0)), ct.c_uint32) -assert_type(np.ctypeslib.as_ctypes(AR_byte.take(0)), ct.c_byte) -assert_type(np.ctypeslib.as_ctypes(AR_short.take(0)), ct.c_short) -assert_type(np.ctypeslib.as_ctypes(AR_intc.take(0)), ct.c_int) -assert_type(np.ctypeslib.as_ctypes(AR_single.take(0)), ct.c_float) -assert_type(np.ctypeslib.as_ctypes(AR_double.take(0)), ct.c_double) -assert_type(np.ctypeslib.as_ctypes(AR_void.take(0)), Any) +assert_type(np.ctypeslib.as_ctypes(np.bool()), ct.c_bool) +assert_type(np.ctypeslib.as_ctypes(np.int8()), ct.c_int8) +assert_type(np.ctypeslib.as_ctypes(np.uint8()), ct.c_uint8) +assert_type(np.ctypeslib.as_ctypes(np.int16()), ct.c_int16) +assert_type(np.ctypeslib.as_ctypes(np.uint16()), ct.c_uint16) +assert_type(np.ctypeslib.as_ctypes(np.int32()), ct.c_int32) +assert_type(np.ctypeslib.as_ctypes(np.uint32()), ct.c_uint32) +assert_type(np.ctypeslib.as_ctypes(np.int64()), ct.c_int64) +assert_type(np.ctypeslib.as_ctypes(np.uint64()), ct.c_uint64) +assert_type(np.ctypeslib.as_ctypes(np.float32()), ct.c_float) +assert_type(np.ctypeslib.as_ctypes(np.float64()), ct.c_double) +assert_type(np.ctypeslib.as_ctypes(np.longdouble()), ct.c_longdouble) +assert_type(np.ctypeslib.as_ctypes(np.void(b"")), Any) assert_type(np.ctypeslib.as_ctypes(AR_bool), ct.Array[ct.c_bool]) -assert_type(np.ctypeslib.as_ctypes(AR_ubyte), ct.Array[ct.c_ubyte]) -assert_type(np.ctypeslib.as_ctypes(AR_ushort), ct.Array[ct.c_ushort]) -assert_type(np.ctypeslib.as_ctypes(AR_uintc), ct.Array[ct.c_uint]) -assert_type(np.ctypeslib.as_ctypes(AR_byte), ct.Array[ct.c_byte]) -assert_type(np.ctypeslib.as_ctypes(AR_short), ct.Array[ct.c_short]) -assert_type(np.ctypeslib.as_ctypes(AR_intc), ct.Array[ct.c_int]) -assert_type(np.ctypeslib.as_ctypes(AR_single), ct.Array[ct.c_float]) -assert_type(np.ctypeslib.as_ctypes(AR_double), ct.Array[ct.c_double]) +assert_type(np.ctypeslib.as_ctypes(AR_i8), ct.Array[ct.c_int8]) +assert_type(np.ctypeslib.as_ctypes(AR_u8), ct.Array[ct.c_uint8]) +assert_type(np.ctypeslib.as_ctypes(AR_i16), ct.Array[ct.c_int16]) +assert_type(np.ctypeslib.as_ctypes(AR_u16), ct.Array[ct.c_uint16]) +assert_type(np.ctypeslib.as_ctypes(AR_i32), ct.Array[ct.c_int32]) +assert_type(np.ctypeslib.as_ctypes(AR_u32), ct.Array[ct.c_uint32]) +assert_type(np.ctypeslib.as_ctypes(AR_i64), ct.Array[ct.c_int64]) +assert_type(np.ctypeslib.as_ctypes(AR_u64), ct.Array[ct.c_uint64]) +assert_type(np.ctypeslib.as_ctypes(AR_f32), ct.Array[ct.c_float]) +assert_type(np.ctypeslib.as_ctypes(AR_f64), ct.Array[ct.c_double]) +assert_type(np.ctypeslib.as_ctypes(AR_f80), ct.Array[ct.c_longdouble]) assert_type(np.ctypeslib.as_ctypes(AR_void), ct.Array[Any]) -assert_type(np.ctypeslib.as_array(AR_ubyte), npt.NDArray[np.ubyte]) +assert_type(np.ctypeslib.as_array(AR_u8), npt.NDArray[np.ubyte]) assert_type(np.ctypeslib.as_array(1), npt.NDArray[Any]) assert_type(np.ctypeslib.as_array(pointer), npt.NDArray[Any]) - -assert_type(np.ctypeslib.as_ctypes_type(np.long), type[ct.c_long]) -assert_type(np.ctypeslib.as_ctypes_type(np.ulong), type[ct.c_ulong]) -assert_type(np.ctypeslib.as_ctypes(AR_ulong), ct.Array[ct.c_ulong]) -assert_type(np.ctypeslib.as_ctypes(AR_long), ct.Array[ct.c_long]) -assert_type(np.ctypeslib.as_ctypes(AR_long.take(0)), ct.c_long) -assert_type(np.ctypeslib.as_ctypes(AR_ulong.take(0)), ct.c_ulong) diff --git a/numpy/typing/tests/data/reveal/dtype.pyi b/numpy/typing/tests/data/reveal/dtype.pyi index 721d2708737f..48e9d54b7951 100644 --- a/numpy/typing/tests/data/reveal/dtype.pyi +++ b/numpy/typing/tests/data/reveal/dtype.pyi @@ -2,24 +2,20 @@ import ctypes as ct import datetime as dt from decimal import Decimal from fractions import Fraction -from typing import Any, Literal, LiteralString, TypeAlias, assert_type +from typing import Any, Literal, LiteralString, assert_type import numpy as np from numpy.dtypes import StringDType # a combination of likely `object` dtype-like candidates (no `_co`) -_PyObjectLike: TypeAlias = Decimal | Fraction | dt.datetime | dt.timedelta +type _PyObjectLike = Decimal | Fraction | dt.datetime | dt.timedelta dtype_U: np.dtype[np.str_] dtype_V: np.dtype[np.void] dtype_i8: np.dtype[np.int64] -py_int_co: type[int] -py_float_co: type[float] -py_complex_co: type[complex] py_object: type[_PyObjectLike] py_character: type[str | bytes] -py_flexible: type[str | bytes | memoryview] ct_floating: type[ct.c_float | ct.c_double | ct.c_longdouble] ct_number: type[ct.c_uint8 | ct.c_float] @@ -48,19 +44,16 @@ assert_type(np.dtype("str"), np.dtype[np.str_]) # Python types assert_type(np.dtype(bool), np.dtype[np.bool]) -assert_type(np.dtype(py_int_co), np.dtype[np.int_ | np.bool]) -assert_type(np.dtype(int), np.dtype[np.int_ | np.bool]) -assert_type(np.dtype(py_float_co), np.dtype[np.float64 | np.int_ | np.bool]) -assert_type(np.dtype(float), np.dtype[np.float64 | np.int_ | np.bool]) -assert_type(np.dtype(py_complex_co), np.dtype[np.complex128 | np.float64 | np.int_ | np.bool]) -assert_type(np.dtype(complex), np.dtype[np.complex128 | np.float64 | np.int_ | np.bool]) +assert_type(np.dtype(int), np.dtype[np.int_ | Any]) +assert_type(np.dtype(float), np.dtype[np.float64 | Any]) +assert_type(np.dtype(complex), np.dtype[np.complex128 | Any]) assert_type(np.dtype(py_object), np.dtype[np.object_]) assert_type(np.dtype(str), np.dtype[np.str_]) assert_type(np.dtype(bytes), np.dtype[np.bytes_]) -assert_type(np.dtype(py_character), np.dtype[np.character]) assert_type(np.dtype(memoryview), np.dtype[np.void]) -assert_type(np.dtype(py_flexible), np.dtype[np.flexible]) +assert_type(np.dtype(py_character), np.dtype[np.character]) +# object types assert_type(np.dtype(list), np.dtype[np.object_]) assert_type(np.dtype(dt.datetime), np.dtype[np.object_]) assert_type(np.dtype(dt.timedelta), np.dtype[np.object_]) @@ -71,16 +64,13 @@ assert_type(np.dtype(Fraction), np.dtype[np.object_]) assert_type(np.dtype("?"), np.dtype[np.bool]) assert_type(np.dtype("|b1"), np.dtype[np.bool]) assert_type(np.dtype("u1"), np.dtype[np.uint8]) -assert_type(np.dtype("l"), np.dtype[np.long]) +assert_type(np.dtype("l"), np.dtype[np.int32 | np.int64]) assert_type(np.dtype("longlong"), np.dtype[np.longlong]) assert_type(np.dtype(">g"), np.dtype[np.longdouble]) assert_type(np.dtype(cs_integer), np.dtype[np.integer]) -assert_type(np.dtype(cs_number), np.dtype[np.number]) -assert_type(np.dtype(cs_flex), np.dtype[np.flexible]) -assert_type(np.dtype(cs_generic), np.dtype[np.generic]) # ctypes -assert_type(np.dtype(ct.c_double), np.dtype[np.double]) +assert_type(np.dtype(ct.c_double), np.dtype[np.float64]) # see numpy/numpy#29155 assert_type(np.dtype(ct.c_longlong), np.dtype[np.longlong]) assert_type(np.dtype(ct.c_uint32), np.dtype[np.uint32]) assert_type(np.dtype(ct.c_bool), np.dtype[np.bool]) @@ -90,7 +80,7 @@ assert_type(np.dtype(ct.py_object), np.dtype[np.object_]) # Special case for None assert_type(np.dtype(None), np.dtype[np.float64]) -# Dypes of dtypes +# dtypes of dtypes assert_type(np.dtype(np.dtype(np.float64)), np.dtype[np.float64]) assert_type(np.dtype(dt_inexact), np.dtype[np.inexact]) @@ -99,6 +89,7 @@ assert_type(np.dtype("S8"), np.dtype) # Void assert_type(np.dtype(("U", 10)), np.dtype[np.void]) +assert_type(np.dtype({"formats": (int, "u8"), "names": ("n", "B")}), np.dtype[np.void]) # StringDType assert_type(np.dtype(dt_string), StringDType) @@ -134,3 +125,8 @@ assert_type(dtype_V["f0"], np.dtype) assert_type(dtype_V[0], np.dtype) assert_type(dtype_V[["f0", "f1"]], np.dtype[np.void]) assert_type(dtype_V[["f0"]], np.dtype[np.void]) + +class _D: + __numpy_dtype__: np.dtype[np.int8] + +assert_type(np.dtype(_D()), np.dtype[np.int8]) diff --git a/numpy/typing/tests/data/reveal/flatiter.pyi b/numpy/typing/tests/data/reveal/flatiter.pyi index e188d30fe79f..4907f8464cf2 100644 --- a/numpy/typing/tests/data/reveal/flatiter.pyi +++ b/numpy/typing/tests/data/reveal/flatiter.pyi @@ -1,47 +1,86 @@ -from typing import Literal, TypeAlias, assert_type +from typing import Any, assert_type import numpy as np -import numpy.typing as npt - -a: np.flatiter[npt.NDArray[np.str_]] -a_1d: np.flatiter[np.ndarray[tuple[int], np.dtype[np.bytes_]]] - -Size: TypeAlias = Literal[42] -a_1d_fixed: np.flatiter[np.ndarray[tuple[Size], np.dtype[np.object_]]] - -assert_type(a.base, npt.NDArray[np.str_]) -assert_type(a.copy(), npt.NDArray[np.str_]) -assert_type(a.coords, tuple[int, ...]) -assert_type(a.index, int) -assert_type(iter(a), np.flatiter[npt.NDArray[np.str_]]) -assert_type(next(a), np.str_) -assert_type(a[0], np.str_) -assert_type(a[[0, 1, 2]], npt.NDArray[np.str_]) -assert_type(a[...], npt.NDArray[np.str_]) -assert_type(a[:], npt.NDArray[np.str_]) -assert_type(a[(...,)], npt.NDArray[np.str_]) -assert_type(a[(0,)], np.str_) - -assert_type(a.__array__(), npt.NDArray[np.str_]) -assert_type(a.__array__(np.dtype(np.float64)), npt.NDArray[np.float64]) -assert_type( - a_1d.__array__(), - np.ndarray[tuple[int], np.dtype[np.bytes_]], -) -assert_type( - a_1d.__array__(np.dtype(np.float64)), - np.ndarray[tuple[int], np.dtype[np.float64]], -) -assert_type( - a_1d_fixed.__array__(), - np.ndarray[tuple[Size], np.dtype[np.object_]], -) -assert_type( - a_1d_fixed.__array__(np.dtype(np.float64)), - np.ndarray[tuple[Size], np.dtype[np.float64]], -) - -a[0] = "a" -a[:5] = "a" -a[...] = "a" -a[(...,)] = "a" + +type _ArrayND = np.ndarray[tuple[Any, ...], np.dtypes.StrDType] +type _Array1D = np.ndarray[tuple[int], np.dtypes.BytesDType] +type _Array2D = np.ndarray[tuple[int, int], np.dtypes.Int8DType] + +_a_nd: np.flatiter[_ArrayND] +_a_1d: np.flatiter[_Array1D] +_a_2d: np.flatiter[_Array2D] + +### + +# .base +assert_type(_a_nd.base, _ArrayND) +assert_type(_a_1d.base, _Array1D) +assert_type(_a_2d.base, _Array2D) + +# .coords +assert_type(_a_nd.coords, tuple[Any, ...]) +assert_type(_a_1d.coords, tuple[int]) +assert_type(_a_2d.coords, tuple[int, int]) + +# .index +assert_type(_a_nd.index, int) +assert_type(_a_1d.index, int) +assert_type(_a_2d.index, int) + +# .__len__() +assert_type(len(_a_nd), int) +assert_type(len(_a_1d), int) +assert_type(len(_a_2d), int) + +# .__iter__() +assert_type(iter(_a_nd), np.flatiter[_ArrayND]) +assert_type(iter(_a_1d), np.flatiter[_Array1D]) +assert_type(iter(_a_2d), np.flatiter[_Array2D]) + +# .__next__() +assert_type(next(_a_nd), np.str_) +assert_type(next(_a_1d), np.bytes_) +assert_type(next(_a_2d), np.int8) + +# .__getitem__(()) +assert_type(_a_nd[()], _ArrayND) +assert_type(_a_1d[()], _Array1D) +assert_type(_a_2d[()], _Array2D) +# .__getitem__(int) +assert_type(_a_nd[0], np.str_) +assert_type(_a_1d[0], np.bytes_) +assert_type(_a_2d[0], np.int8) +# .__getitem__(slice) +assert_type(_a_nd[::], np.ndarray[tuple[int], np.dtypes.StrDType]) +assert_type(_a_1d[::], np.ndarray[tuple[int], np.dtypes.BytesDType]) +assert_type(_a_2d[::], np.ndarray[tuple[int], np.dtypes.Int8DType]) +# .__getitem__(EllipsisType) +assert_type(_a_nd[...], np.ndarray[tuple[int], np.dtypes.StrDType]) +assert_type(_a_1d[...], np.ndarray[tuple[int], np.dtypes.BytesDType]) +assert_type(_a_2d[...], np.ndarray[tuple[int], np.dtypes.Int8DType]) +# .__getitem__(list[!]) +assert_type(_a_nd[[]], np.ndarray[tuple[int], np.dtypes.StrDType]) +assert_type(_a_1d[[]], np.ndarray[tuple[int], np.dtypes.BytesDType]) +assert_type(_a_2d[[]], np.ndarray[tuple[int], np.dtypes.Int8DType]) +# .__getitem__(list[int]) +assert_type(_a_nd[[0]], np.ndarray[tuple[int], np.dtypes.StrDType]) +assert_type(_a_1d[[0]], np.ndarray[tuple[int], np.dtypes.BytesDType]) +assert_type(_a_2d[[0]], np.ndarray[tuple[int], np.dtypes.Int8DType]) +# .__getitem__(list[list[int]]) +assert_type(_a_nd[[[0]]], np.ndarray[tuple[int, int], np.dtypes.StrDType]) +assert_type(_a_1d[[[0]]], np.ndarray[tuple[int, int], np.dtypes.BytesDType]) +assert_type(_a_2d[[[0]]], np.ndarray[tuple[int, int], np.dtypes.Int8DType]) +# .__getitem__(list[list[list[list[int]]]]) +assert_type(_a_nd[[[[[0]]]]], np.ndarray[tuple[Any, ...], np.dtypes.StrDType]) +assert_type(_a_1d[[[[[0]]]]], np.ndarray[tuple[Any, ...], np.dtypes.BytesDType]) +assert_type(_a_2d[[[[[0]]]]], np.ndarray[tuple[Any, ...], np.dtypes.Int8DType]) + +# __array__() +assert_type(_a_nd.__array__(), np.ndarray[tuple[int], np.dtypes.StrDType]) +assert_type(_a_1d.__array__(), np.ndarray[tuple[int], np.dtypes.BytesDType]) +assert_type(_a_2d.__array__(), np.ndarray[tuple[int], np.dtypes.Int8DType]) + +# .copy() +assert_type(_a_nd.copy(), np.ndarray[tuple[int], np.dtypes.StrDType]) +assert_type(_a_1d.copy(), np.ndarray[tuple[int], np.dtypes.BytesDType]) +assert_type(_a_2d.copy(), np.ndarray[tuple[int], np.dtypes.Int8DType]) diff --git a/numpy/typing/tests/data/reveal/fromnumeric.pyi b/numpy/typing/tests/data/reveal/fromnumeric.pyi index 5438e001a13f..26de2c2b6e37 100644 --- a/numpy/typing/tests/data/reveal/fromnumeric.pyi +++ b/numpy/typing/tests/data/reveal/fromnumeric.pyi @@ -1,12 +1,11 @@ """Tests for :mod:`_core.fromnumeric`.""" from typing import Any, assert_type -from typing import Literal as L import numpy as np import numpy.typing as npt -class NDArraySubclass(npt.NDArray[np.complex128]): ... +class NDArraySubclass(np.ndarray[tuple[Any, ...], np.dtype[np.complex128]]): ... AR_b: npt.NDArray[np.bool] AR_f4: npt.NDArray[np.float32] @@ -26,7 +25,7 @@ i8: np.int64 f: float # integer‑dtype subclass for argmin/argmax -class NDArrayIntSubclass(npt.NDArray[np.intp]): ... +class NDArrayIntSubclass(np.ndarray[tuple[Any, ...], np.dtype[np.intp]]): ... AR_sub_i: NDArrayIntSubclass assert_type(np.take(b, 0), np.bool) @@ -124,17 +123,18 @@ assert_type(np.diagonal(AR_f4), npt.NDArray[np.float32]) assert_type(np.trace(AR_b), Any) assert_type(np.trace(AR_f4), Any) assert_type(np.trace(AR_f4, out=AR_subclass), NDArraySubclass) +assert_type(np.trace(AR_f4, out=AR_subclass, dtype=None), NDArraySubclass) assert_type(np.ravel(b), np.ndarray[tuple[int], np.dtype[np.bool]]) assert_type(np.ravel(f4), np.ndarray[tuple[int], np.dtype[np.float32]]) -assert_type(np.ravel(f), np.ndarray[tuple[int], np.dtype[np.float64 | np.int_ | np.bool]]) +assert_type(np.ravel(f), np.ndarray[tuple[int], np.dtype[np.float64 | Any]]) assert_type(np.ravel(AR_b), np.ndarray[tuple[int], np.dtype[np.bool]]) assert_type(np.ravel(AR_f4), np.ndarray[tuple[int], np.dtype[np.float32]]) -assert_type(np.nonzero(AR_b), tuple[npt.NDArray[np.intp], ...]) -assert_type(np.nonzero(AR_f4), tuple[npt.NDArray[np.intp], ...]) -assert_type(np.nonzero(AR_1d), tuple[npt.NDArray[np.intp], ...]) -assert_type(np.nonzero(AR_nd), tuple[npt.NDArray[np.intp], ...]) +assert_type(np.nonzero(AR_b), tuple[np.ndarray[tuple[int], np.dtype[np.intp]], ...]) +assert_type(np.nonzero(AR_f4), tuple[np.ndarray[tuple[int], np.dtype[np.intp]], ...]) +assert_type(np.nonzero(AR_1d), tuple[np.ndarray[tuple[int], np.dtype[np.intp]], ...]) +assert_type(np.nonzero(AR_nd), tuple[np.ndarray[tuple[int], np.dtype[np.intp]], ...]) assert_type(np.shape(b), tuple[()]) assert_type(np.shape(f), tuple[()]) diff --git a/numpy/typing/tests/data/reveal/getlimits.pyi b/numpy/typing/tests/data/reveal/getlimits.pyi index 825daba43064..cc964d753055 100644 --- a/numpy/typing/tests/data/reveal/getlimits.pyi +++ b/numpy/typing/tests/data/reveal/getlimits.pyi @@ -1,11 +1,11 @@ -from typing import Any, LiteralString, assert_type +from typing import assert_type import numpy as np -from numpy._typing import _64Bit f: float f8: np.float64 c8: np.complex64 +c16: np.complex128 i: int i8: np.int64 @@ -15,9 +15,10 @@ finfo_f8: np.finfo[np.float64] iinfo_i8: np.iinfo[np.int64] assert_type(np.finfo(f), np.finfo[np.float64]) -assert_type(np.finfo(f8), np.finfo[np.floating[_64Bit]]) +assert_type(np.finfo(f8), np.finfo[np.float64]) assert_type(np.finfo(c8), np.finfo[np.float32]) -assert_type(np.finfo('f2'), np.finfo[np.floating]) +assert_type(np.finfo(c16), np.finfo[np.float64]) +assert_type(np.finfo("f2"), np.finfo[np.float16]) assert_type(finfo_f8.dtype, np.dtype[np.float64]) assert_type(finfo_f8.bits, int) @@ -41,11 +42,12 @@ assert_type(finfo_f8.smallest_subnormal, np.float64) assert_type(np.iinfo(i), np.iinfo[np.int_]) assert_type(np.iinfo(i8), np.iinfo[np.int64]) assert_type(np.iinfo(u4), np.iinfo[np.uint32]) -assert_type(np.iinfo('i2'), np.iinfo[Any]) +assert_type(np.iinfo("i2"), np.iinfo[np.int16]) +assert_type(np.iinfo("u2"), np.iinfo[np.uint16]) assert_type(iinfo_i8.dtype, np.dtype[np.int64]) -assert_type(iinfo_i8.kind, LiteralString) +assert_type(iinfo_i8.kind, str) assert_type(iinfo_i8.bits, int) -assert_type(iinfo_i8.key, LiteralString) +assert_type(iinfo_i8.key, str) assert_type(iinfo_i8.min, int) assert_type(iinfo_i8.max, int) diff --git a/numpy/typing/tests/data/reveal/lib_function_base.pyi b/numpy/typing/tests/data/reveal/lib_function_base.pyi index 3ce8d375201b..090af934a411 100644 --- a/numpy/typing/tests/data/reveal/lib_function_base.pyi +++ b/numpy/typing/tests/data/reveal/lib_function_base.pyi @@ -1,20 +1,26 @@ from collections.abc import Callable from fractions import Fraction -from typing import Any, assert_type +from typing import Any, LiteralString, assert_type, type_check_only import numpy as np import numpy.typing as npt -vectorized_func: np.vectorize - f8: np.float64 +AR_LIKE_b: list[bool] +AR_LIKE_i8: list[int] AR_LIKE_f8: list[float] AR_LIKE_c16: list[complex] AR_LIKE_O: list[Fraction] +AR_u1: npt.NDArray[np.uint8] AR_i8: npt.NDArray[np.int64] +AR_f2: npt.NDArray[np.float16] +AR_f4: npt.NDArray[np.float32] AR_f8: npt.NDArray[np.float64] +AR_f10: npt.NDArray[np.longdouble] +AR_c8: npt.NDArray[np.complex64] AR_c16: npt.NDArray[np.complex128] +AR_c20: npt.NDArray[np.clongdouble] AR_m: npt.NDArray[np.timedelta64] AR_M: npt.NDArray[np.datetime64] AR_O: npt.NDArray[np.object_] @@ -22,21 +28,26 @@ AR_b: npt.NDArray[np.bool] AR_U: npt.NDArray[np.str_] CHAR_AR_U: np.char.chararray[tuple[Any, ...], np.dtype[np.str_]] +AR_f8_1d: np.ndarray[tuple[int], np.dtype[np.float64]] +AR_f8_2d: np.ndarray[tuple[int, int], np.dtype[np.float64]] +AR_f8_3d: np.ndarray[tuple[int, int, int], np.dtype[np.float64]] +AR_c16_1d: np.ndarray[tuple[int], np.dtype[np.complex128]] + AR_b_list: list[npt.NDArray[np.bool]] -def func( - a: npt.NDArray[Any], - posarg: bool = ..., - /, - arg: int = ..., - *, - kwarg: str = ..., -) -> npt.NDArray[Any]: ... +@type_check_only +def func(a: np.ndarray, posarg: bool = ..., /, arg: int = ..., *, kwarg: str = ...) -> np.ndarray: ... +@type_check_only +def func_f8(a: npt.NDArray[np.float64]) -> npt.NDArray[np.float64]: ... +### + +# vectorize +vectorized_func: np.vectorize assert_type(vectorized_func.pyfunc, Callable[..., Any]) assert_type(vectorized_func.cache, bool) -assert_type(vectorized_func.signature, str | None) -assert_type(vectorized_func.otypes, str | None) +assert_type(vectorized_func.signature, LiteralString | None) +assert_type(vectorized_func.otypes, LiteralString | None) assert_type(vectorized_func.excluded, set[int | str]) assert_type(vectorized_func.__doc__, str | None) assert_type(vectorized_func([1]), Any) @@ -46,137 +57,271 @@ assert_type( np.vectorize, ) +# rot90 +assert_type(np.rot90(AR_f8_1d), np.ndarray[tuple[int], np.dtype[np.float64]]) assert_type(np.rot90(AR_f8, k=2), npt.NDArray[np.float64]) -assert_type(np.rot90(AR_LIKE_f8, axes=(0, 1)), npt.NDArray[Any]) +assert_type(np.rot90(AR_LIKE_f8, axes=(0, 1)), np.ndarray) -assert_type(np.flip(f8), np.float64) -assert_type(np.flip(1.0), Any) +# flip +assert_type(np.flip(AR_f8_1d), np.ndarray[tuple[int], np.dtype[np.float64]]) assert_type(np.flip(AR_f8, axis=(0, 1)), npt.NDArray[np.float64]) -assert_type(np.flip(AR_LIKE_f8, axis=0), npt.NDArray[Any]) +assert_type(np.flip(AR_LIKE_f8, axis=0), np.ndarray) +# iterable assert_type(np.iterable(1), bool) assert_type(np.iterable([1]), bool) -assert_type(np.average(AR_f8), np.floating) -assert_type(np.average(AR_f8, weights=AR_c16), np.complexfloating) +# average +assert_type(np.average(AR_f8_2d), np.float64) +assert_type(np.average(AR_f8_2d, axis=1), npt.NDArray[np.float64]) +assert_type(np.average(AR_f8_2d, keepdims=True), np.ndarray[tuple[int, int], np.dtype[np.float64]]) +assert_type(np.average(AR_f8), np.float64) +assert_type(np.average(AR_f8, axis=1), npt.NDArray[np.float64]) +assert_type(np.average(AR_f8, keepdims=True), npt.NDArray[np.float64]) +assert_type(np.average(AR_f8, returned=True), tuple[np.float64, np.float64]) +assert_type(np.average(AR_f8, axis=1, returned=True), tuple[npt.NDArray[np.float64], npt.NDArray[np.float64]]) +assert_type(np.average(AR_f8, keepdims=True, returned=True), tuple[npt.NDArray[np.float64], npt.NDArray[np.float64]]) +assert_type(np.average(AR_LIKE_f8), np.float64) +assert_type(np.average(AR_LIKE_f8, weights=AR_f8), np.float64) +assert_type(np.average(AR_LIKE_f8, axis=1), npt.NDArray[np.float64]) +assert_type(np.average(AR_LIKE_f8, keepdims=True), npt.NDArray[np.float64]) +assert_type(np.average(AR_LIKE_f8, returned=True), tuple[np.float64, np.float64]) +assert_type(np.average(AR_LIKE_f8, axis=1, returned=True), tuple[npt.NDArray[np.float64], npt.NDArray[np.float64]]) +assert_type(np.average(AR_LIKE_f8, keepdims=True, returned=True), tuple[npt.NDArray[np.float64], npt.NDArray[np.float64]]) assert_type(np.average(AR_O), Any) -assert_type(np.average(AR_f8, returned=True), tuple[np.floating, np.floating]) -assert_type(np.average(AR_f8, weights=AR_c16, returned=True), tuple[np.complexfloating, np.complexfloating]) +assert_type(np.average(AR_O, axis=1), np.ndarray) +assert_type(np.average(AR_O, keepdims=True), np.ndarray) assert_type(np.average(AR_O, returned=True), tuple[Any, Any]) -assert_type(np.average(AR_f8, axis=0), Any) -assert_type(np.average(AR_f8, axis=0, returned=True), tuple[Any, Any]) +assert_type(np.average(AR_O, axis=1, returned=True), tuple[np.ndarray, np.ndarray]) +assert_type(np.average(AR_O, keepdims=True, returned=True), tuple[np.ndarray, np.ndarray]) +# asarray_chkfinite +assert_type(np.asarray_chkfinite(AR_f8_1d), np.ndarray[tuple[int], np.dtype[np.float64]]) assert_type(np.asarray_chkfinite(AR_f8), npt.NDArray[np.float64]) -assert_type(np.asarray_chkfinite(AR_LIKE_f8), npt.NDArray[Any]) +assert_type(np.asarray_chkfinite(AR_LIKE_f8), np.ndarray) assert_type(np.asarray_chkfinite(AR_f8, dtype=np.float64), npt.NDArray[np.float64]) -assert_type(np.asarray_chkfinite(AR_f8, dtype=float), npt.NDArray[Any]) +assert_type(np.asarray_chkfinite(AR_f8, dtype=float), np.ndarray) +# piecewise +assert_type(np.piecewise(AR_f8_1d, AR_b, [func]), np.ndarray[tuple[int], np.dtype[np.float64]]) assert_type(np.piecewise(AR_f8, AR_b, [func]), npt.NDArray[np.float64]) +assert_type(np.piecewise(AR_f8, AR_b, [func_f8]), npt.NDArray[np.float64]) assert_type(np.piecewise(AR_f8, AR_b_list, [func]), npt.NDArray[np.float64]) -assert_type(np.piecewise(AR_f8, AR_b_list, [func], True, -1, kwarg=''), npt.NDArray[np.float64]) -assert_type(np.piecewise(AR_f8, AR_b_list, [func], True, arg=-1, kwarg=''), npt.NDArray[np.float64]) -assert_type(np.piecewise(AR_LIKE_f8, AR_b_list, [func]), npt.NDArray[Any]) - -assert_type(np.select([AR_f8], [AR_f8]), npt.NDArray[Any]) +assert_type(np.piecewise(AR_f8, AR_b_list, [func_f8]), npt.NDArray[np.float64]) +assert_type(np.piecewise(AR_f8, AR_b_list, [func], True, -1, kwarg=""), npt.NDArray[np.float64]) +assert_type(np.piecewise(AR_f8, AR_b_list, [func], True, arg=-1, kwarg=""), npt.NDArray[np.float64]) +assert_type(np.piecewise(AR_LIKE_f8, AR_b_list, [func]), np.ndarray) +assert_type(np.piecewise(AR_LIKE_f8, AR_b_list, [func_f8]), npt.NDArray[np.float64]) + +# extract +assert_type(np.extract(AR_i8, AR_f8), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(np.extract(AR_i8, AR_LIKE_b), np.ndarray[tuple[int], np.dtype[np.bool]]) +assert_type(np.extract(AR_i8, AR_LIKE_i8), np.ndarray[tuple[int], np.dtype[np.int_]]) +assert_type(np.extract(AR_i8, AR_LIKE_f8), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(np.extract(AR_i8, AR_LIKE_c16), np.ndarray[tuple[int], np.dtype[np.complex128]]) + +# select +assert_type(np.select([AR_b], [AR_f8_1d]), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(np.select([AR_b], [AR_f8]), npt.NDArray[np.float64]) + +# places +assert_type(np.place(AR_f8, mask=AR_i8, vals=5.0), None) -assert_type(np.copy(AR_LIKE_f8), npt.NDArray[Any]) +# copy +assert_type(np.copy(AR_LIKE_f8), np.ndarray) assert_type(np.copy(AR_U), npt.NDArray[np.str_]) -assert_type(np.copy(CHAR_AR_U), np.ndarray[Any, Any]) # pyright correctly infers `NDArray[str_]` assert_type(np.copy(CHAR_AR_U, "K", subok=True), np.char.chararray[tuple[Any, ...], np.dtype[np.str_]]) assert_type(np.copy(CHAR_AR_U, subok=True), np.char.chararray[tuple[Any, ...], np.dtype[np.str_]]) +# pyright correctly infers `NDArray[str_]` here +assert_type(np.copy(CHAR_AR_U), np.ndarray[Any, Any]) # pyright: ignore[reportAssertTypeFailure] -assert_type(np.gradient(AR_f8, axis=None), Any) -assert_type(np.gradient(AR_LIKE_f8, edge_order=2), Any) - -assert_type(np.diff("bob", n=0), str) -assert_type(np.diff(AR_f8, axis=0), npt.NDArray[Any]) -assert_type(np.diff(AR_LIKE_f8, prepend=1.5), npt.NDArray[Any]) - +# gradient +assert_type(np.gradient(AR_f8_1d, 1), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type( + np.gradient(AR_f8_2d, [1, 2], [2, 3.5, 4]), + tuple[ + np.ndarray[tuple[int, int], np.dtype[np.float64]], + np.ndarray[tuple[int, int], np.dtype[np.float64]], + ], +) +assert_type( + np.gradient(AR_f8_3d), + tuple[ + np.ndarray[tuple[int, int, int], np.dtype[np.float64]], + np.ndarray[tuple[int, int, int], np.dtype[np.float64]], + np.ndarray[tuple[int, int, int], np.dtype[np.float64]], + ], +) +assert_type(np.gradient(AR_f8), np.ndarray[tuple[int], np.dtype[np.float64]] | Any) +assert_type(np.gradient(AR_LIKE_f8, edge_order=2), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(np.gradient(AR_LIKE_c16, axis=0), np.ndarray[tuple[int], np.dtype[np.complex128]]) + +# diff +assert_type(np.diff("git", n=0), str) +assert_type(np.diff(AR_f8), npt.NDArray[np.float64]) +assert_type(np.diff(AR_f8_1d, axis=0), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(np.diff(AR_f8_2d, axis=0), np.ndarray[tuple[int, int], np.dtype[np.float64]]) +assert_type(np.diff(AR_LIKE_f8, prepend=1.5), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(np.diff(AR_c16), npt.NDArray[np.complex128]) +assert_type(np.diff(AR_c16_1d), np.ndarray[tuple[int], np.dtype[np.complex128]]) +assert_type(np.diff(AR_LIKE_c16), np.ndarray[tuple[int], np.dtype[np.complex128]]) + +# interp assert_type(np.interp(1, [1], AR_f8), np.float64) assert_type(np.interp(1, [1], [1]), np.float64) assert_type(np.interp(1, [1], AR_c16), np.complex128) -assert_type(np.interp(1, [1], [1j]), np.complex128) # pyright correctly infers `complex128 | float64` -assert_type(np.interp([1], [1], AR_f8), npt.NDArray[np.float64]) -assert_type(np.interp([1], [1], [1]), npt.NDArray[np.float64]) -assert_type(np.interp([1], [1], AR_c16), npt.NDArray[np.complex128]) -assert_type(np.interp([1], [1], [1j]), npt.NDArray[np.complex128]) # pyright correctly infers `NDArray[complex128 | float64]` - -assert_type(np.angle(f8), np.floating) -assert_type(np.angle(AR_f8), npt.NDArray[np.floating]) -assert_type(np.angle(AR_c16, deg=True), npt.NDArray[np.floating]) -assert_type(np.angle(AR_O), npt.NDArray[np.object_]) - -assert_type(np.unwrap(AR_f8), npt.NDArray[np.floating]) +assert_type(np.interp(1, [1], [1j]), np.complex128) +assert_type(np.interp([1], [1], AR_f8), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(np.interp([1], [1], [1]), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(np.interp([1], [1], AR_c16), np.ndarray[tuple[int], np.dtype[np.complex128]]) +assert_type(np.interp([1], [1], [1j]), np.ndarray[tuple[int], np.dtype[np.complex128]]) + +# angle +assert_type(np.angle(1), np.float64) +assert_type(np.angle(1, deg=True), np.float64) +assert_type(np.angle(1j), np.float64) +assert_type(np.angle(f8), np.float64) +assert_type(np.angle(AR_b), npt.NDArray[np.float64]) +assert_type(np.angle(AR_u1), npt.NDArray[np.float64]) +assert_type(np.angle(AR_i8), npt.NDArray[np.float64]) +assert_type(np.angle(AR_f2), npt.NDArray[np.float16]) +assert_type(np.angle(AR_f4), npt.NDArray[np.float32]) +assert_type(np.angle(AR_c8), npt.NDArray[np.float32]) +assert_type(np.angle(AR_f8), npt.NDArray[np.float64]) +assert_type(np.angle(AR_c16), npt.NDArray[np.float64]) +assert_type(np.angle(AR_f10), npt.NDArray[np.longdouble]) +assert_type(np.angle(AR_c20), npt.NDArray[np.longdouble]) +assert_type(np.angle(AR_f8_1d), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(np.angle(AR_c16_1d), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(np.angle(AR_LIKE_b), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(np.angle(AR_LIKE_i8), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(np.angle(AR_LIKE_f8), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(np.angle(AR_LIKE_c16), np.ndarray[tuple[int], np.dtype[np.float64]]) + +# unwrap +assert_type(np.unwrap(AR_f2), npt.NDArray[np.float16]) +assert_type(np.unwrap(AR_f8), npt.NDArray[np.float64]) +assert_type(np.unwrap(AR_f10), npt.NDArray[np.longdouble]) assert_type(np.unwrap(AR_O), npt.NDArray[np.object_]) - -assert_type(np.sort_complex(AR_f8), npt.NDArray[np.complexfloating]) - +assert_type(np.unwrap(AR_f8_1d), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(np.unwrap(AR_f8_2d), np.ndarray[tuple[int, int], np.dtype[np.float64]]) +assert_type(np.unwrap(AR_f8_3d), np.ndarray[tuple[int, int, int], np.dtype[np.float64]]) +assert_type(np.unwrap(AR_LIKE_b), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(np.unwrap(AR_LIKE_i8), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(np.unwrap(AR_LIKE_f8), np.ndarray[tuple[int], np.dtype[np.float64]]) + +# sort_complex +assert_type(np.sort_complex(AR_u1), npt.NDArray[np.complex64]) +assert_type(np.sort_complex(AR_f8), npt.NDArray[np.complex128]) +assert_type(np.sort_complex(AR_f10), npt.NDArray[np.clongdouble]) +assert_type(np.sort_complex(AR_f8_1d), np.ndarray[tuple[int], np.dtype[np.complex128]]) +assert_type(np.sort_complex(AR_c16_1d), np.ndarray[tuple[int], np.dtype[np.complex128]]) + +# trim_zeros assert_type(np.trim_zeros(AR_f8), npt.NDArray[np.float64]) assert_type(np.trim_zeros(AR_LIKE_f8), list[float]) -assert_type(np.extract(AR_i8, AR_f8), npt.NDArray[np.float64]) -assert_type(np.extract(AR_i8, AR_LIKE_f8), npt.NDArray[Any]) - -assert_type(np.place(AR_f8, mask=AR_i8, vals=5.0), None) - -assert_type(np.cov(AR_f8, bias=True), npt.NDArray[np.floating]) -assert_type(np.cov(AR_f8, AR_c16, ddof=1), npt.NDArray[np.complexfloating]) -assert_type(np.cov(AR_f8, aweights=AR_f8, dtype=np.float32), npt.NDArray[np.float32]) -assert_type(np.cov(AR_f8, fweights=AR_f8, dtype=float), npt.NDArray[Any]) - -assert_type(np.corrcoef(AR_f8, rowvar=True), npt.NDArray[np.floating]) -assert_type(np.corrcoef(AR_f8, AR_c16), npt.NDArray[np.complexfloating]) -assert_type(np.corrcoef(AR_f8, dtype=np.float32), npt.NDArray[np.float32]) -assert_type(np.corrcoef(AR_f8, dtype=float), npt.NDArray[Any]) - -assert_type(np.blackman(5), npt.NDArray[np.floating]) -assert_type(np.bartlett(6), npt.NDArray[np.floating]) -assert_type(np.hanning(4.5), npt.NDArray[np.floating]) -assert_type(np.hamming(0), npt.NDArray[np.floating]) -assert_type(np.i0(AR_i8), npt.NDArray[np.floating]) -assert_type(np.kaiser(4, 5.9), npt.NDArray[np.floating]) - -assert_type(np.sinc(1.0), np.floating) -assert_type(np.sinc(1j), np.complexfloating) -assert_type(np.sinc(AR_f8), npt.NDArray[np.floating]) -assert_type(np.sinc(AR_c16), npt.NDArray[np.complexfloating]) - -assert_type(np.median(AR_f8, keepdims=False), np.floating) -assert_type(np.median(AR_c16, overwrite_input=True), np.complexfloating) -assert_type(np.median(AR_m), np.timedelta64) +# cov +assert_type(np.cov(AR_f8_1d), np.ndarray[tuple[()], np.dtype[np.float64]]) +assert_type(np.cov(AR_f8_2d), npt.NDArray[np.float64]) +assert_type(np.cov(AR_f8), npt.NDArray[np.float64]) +assert_type(np.cov(AR_f8, AR_f8), np.ndarray[tuple[int, int], np.dtype[np.float64]]) +assert_type(np.cov(AR_c16, AR_c16), np.ndarray[tuple[int, int], np.dtype[np.complex128]]) +assert_type(np.cov(AR_LIKE_f8), np.ndarray[tuple[()], np.dtype[np.float64]]) +assert_type(np.cov(AR_LIKE_f8, AR_LIKE_f8), np.ndarray[tuple[int, int], np.dtype[np.float64]]) +assert_type(np.cov(AR_LIKE_f8, dtype=np.float16), np.ndarray[tuple[()], np.dtype[np.float16]]) +assert_type(np.cov(AR_LIKE_f8, AR_LIKE_f8, dtype=np.float32), np.ndarray[tuple[int, int], np.dtype[np.float32]]) +assert_type(np.cov(AR_f8, AR_f8, dtype=float), np.ndarray[tuple[int, int]]) +assert_type(np.cov(AR_LIKE_f8, dtype=float), np.ndarray[tuple[()]]) +assert_type(np.cov(AR_LIKE_f8, AR_LIKE_f8, dtype=float), np.ndarray[tuple[int, int]]) + +# corrcoef +assert_type(np.corrcoef(AR_f8_1d), np.float64) +assert_type(np.corrcoef(AR_f8_2d), np.ndarray[tuple[int, int], np.dtype[np.float64]] | np.float64) +assert_type(np.corrcoef(AR_f8), np.ndarray[tuple[int, int], np.dtype[np.float64]] | np.float64) +assert_type(np.corrcoef(AR_f8, AR_f8), np.ndarray[tuple[int, int], np.dtype[np.float64]]) +assert_type(np.corrcoef(AR_c16, AR_c16), np.ndarray[tuple[int, int], np.dtype[np.complex128]]) +assert_type(np.corrcoef(AR_LIKE_f8), np.float64) +assert_type(np.corrcoef(AR_LIKE_f8, AR_LIKE_f8), np.ndarray[tuple[int, int], np.dtype[np.float64]]) +assert_type(np.corrcoef(AR_LIKE_f8, dtype=np.float16), np.float16) +assert_type(np.corrcoef(AR_LIKE_f8, AR_LIKE_f8, dtype=np.float32), np.ndarray[tuple[int, int], np.dtype[np.float32]]) +assert_type(np.corrcoef(AR_f8, AR_f8, dtype=float), np.ndarray[tuple[int, int]]) +assert_type(np.corrcoef(AR_LIKE_f8, dtype=float), Any) +assert_type(np.corrcoef(AR_LIKE_f8, AR_LIKE_f8, dtype=float), np.ndarray[tuple[int, int]]) + +# window functions +assert_type(np.blackman(5), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(np.bartlett(6), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(np.hanning(4.5), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(np.hamming(0), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(np.kaiser(4, 5.9), np.ndarray[tuple[int], np.dtype[np.float64]]) + +# i0 (bessel function) +assert_type(np.i0(AR_i8), npt.NDArray[np.float64]) + +# sinc (cardinal sine function) +assert_type(np.sinc(1.0), np.float64) +assert_type(np.sinc(1j), np.complex128 | Any) +assert_type(np.sinc(AR_f8), npt.NDArray[np.float64]) +assert_type(np.sinc(AR_c16), npt.NDArray[np.complex128]) +assert_type(np.sinc(AR_LIKE_f8), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(np.sinc(AR_LIKE_c16), np.ndarray[tuple[int], np.dtype[np.complex128]]) + +# median +assert_type(np.median(AR_f8, keepdims=False), np.float64) +assert_type(np.median(AR_c16, overwrite_input=True), np.complex128) +# NOTE: Mypy incorrectly infers `Any`, but pyright behaves correctly. +assert_type(np.median(AR_m), np.timedelta64) # type: ignore[assert-type] assert_type(np.median(AR_O), Any) -assert_type(np.median(AR_f8, keepdims=True), Any) -assert_type(np.median(AR_c16, axis=0), Any) +assert_type(np.median(AR_f8, keepdims=True), npt.NDArray[np.float64]) +assert_type(np.median(AR_f8, axis=0), npt.NDArray[np.float64]) +assert_type(np.median(AR_c16, keepdims=True), npt.NDArray[np.complex128]) +assert_type(np.median(AR_c16, axis=0), npt.NDArray[np.complex128]) +assert_type(np.median(AR_LIKE_f8, keepdims=True), npt.NDArray[np.float64]) +assert_type(np.median(AR_LIKE_c16, keepdims=True), npt.NDArray[np.complex128]) assert_type(np.median(AR_LIKE_f8, out=AR_c16), npt.NDArray[np.complex128]) -assert_type(np.percentile(AR_f8, 50), np.floating) -assert_type(np.percentile(AR_c16, 50), np.complexfloating) +# percentile +assert_type(np.percentile(AR_f8, 50), np.float64) +assert_type(np.percentile(AR_f8, 50, axis=1), npt.NDArray[np.float64]) +assert_type(np.percentile(AR_f8, 50, axis=(1, 0)), npt.NDArray[np.float64]) +assert_type(np.percentile(AR_f8, 50, keepdims=True), npt.NDArray[np.float64]) +assert_type(np.percentile(AR_f8, 50, axis=0, keepdims=True), npt.NDArray[np.float64]) +assert_type(np.percentile(AR_c16, 50), np.complex128) assert_type(np.percentile(AR_m, 50), np.timedelta64) assert_type(np.percentile(AR_M, 50, overwrite_input=True), np.datetime64) assert_type(np.percentile(AR_O, 50), Any) -assert_type(np.percentile(AR_f8, [50]), npt.NDArray[np.floating]) -assert_type(np.percentile(AR_c16, [50]), npt.NDArray[np.complexfloating]) +assert_type(np.percentile(AR_f8, [50]), npt.NDArray[np.float64]) +assert_type(np.percentile(AR_f8, [50], axis=1), npt.NDArray[np.float64]) +assert_type(np.percentile(AR_f8, [50], keepdims=True), npt.NDArray[np.float64]) +assert_type(np.percentile(AR_c16, [50]), npt.NDArray[np.complex128]) assert_type(np.percentile(AR_m, [50]), npt.NDArray[np.timedelta64]) assert_type(np.percentile(AR_M, [50], method="nearest"), npt.NDArray[np.datetime64]) assert_type(np.percentile(AR_O, [50]), npt.NDArray[np.object_]) -assert_type(np.percentile(AR_f8, [50], keepdims=True), Any) -assert_type(np.percentile(AR_f8, [50], axis=[1]), Any) +assert_type(np.percentile(AR_f8, [50], keepdims=True), npt.NDArray[np.float64]) assert_type(np.percentile(AR_f8, [50], out=AR_c16), npt.NDArray[np.complex128]) -assert_type(np.quantile(AR_f8, 0.5), np.floating) -assert_type(np.quantile(AR_c16, 0.5), np.complexfloating) -assert_type(np.quantile(AR_m, 0.5), np.timedelta64) -assert_type(np.quantile(AR_M, 0.5, overwrite_input=True), np.datetime64) -assert_type(np.quantile(AR_O, 0.5), Any) -assert_type(np.quantile(AR_f8, [0.5]), npt.NDArray[np.floating]) -assert_type(np.quantile(AR_c16, [0.5]), npt.NDArray[np.complexfloating]) -assert_type(np.quantile(AR_m, [0.5]), npt.NDArray[np.timedelta64]) -assert_type(np.quantile(AR_M, [0.5], method="nearest"), npt.NDArray[np.datetime64]) -assert_type(np.quantile(AR_O, [0.5]), npt.NDArray[np.object_]) -assert_type(np.quantile(AR_f8, [0.5], keepdims=True), Any) -assert_type(np.quantile(AR_f8, [0.5], axis=[1]), Any) -assert_type(np.quantile(AR_f8, [0.5], out=AR_c16), npt.NDArray[np.complex128]) - +# quantile +assert_type(np.quantile(AR_f8, 0.50), np.float64) +assert_type(np.quantile(AR_f8, 0.50, axis=1), npt.NDArray[np.float64]) +assert_type(np.quantile(AR_f8, 0.50, axis=(1, 0)), npt.NDArray[np.float64]) +assert_type(np.quantile(AR_f8, 0.50, keepdims=True), npt.NDArray[np.float64]) +assert_type(np.quantile(AR_f8, 0.50, axis=0, keepdims=True), npt.NDArray[np.float64]) +assert_type(np.quantile(AR_c16, 0.50), np.complex128) +assert_type(np.quantile(AR_m, 0.50), np.timedelta64) +assert_type(np.quantile(AR_M, 0.50, overwrite_input=True), np.datetime64) +assert_type(np.quantile(AR_O, 0.50), Any) +assert_type(np.quantile(AR_f8, [0.50]), npt.NDArray[np.float64]) +assert_type(np.quantile(AR_f8, [0.50], axis=1), npt.NDArray[np.float64]) +assert_type(np.quantile(AR_f8, [0.50], keepdims=True), npt.NDArray[np.float64]) +assert_type(np.quantile(AR_c16, [0.50]), npt.NDArray[np.complex128]) +assert_type(np.quantile(AR_m, [0.50]), npt.NDArray[np.timedelta64]) +assert_type(np.quantile(AR_M, [0.50], method="nearest"), npt.NDArray[np.datetime64]) +assert_type(np.quantile(AR_O, [0.50]), npt.NDArray[np.object_]) +assert_type(np.quantile(AR_f8, [0.50], keepdims=True), npt.NDArray[np.float64]) +assert_type(np.quantile(AR_f8, [0.50], out=AR_c16), npt.NDArray[np.complex128]) + +# trapezoid assert_type(np.trapezoid(AR_LIKE_f8), np.float64) assert_type(np.trapezoid(AR_LIKE_f8, AR_LIKE_f8), np.float64) assert_type(np.trapezoid(AR_LIKE_c16), np.complex128) @@ -188,26 +333,79 @@ assert_type(np.trapezoid(AR_f8), np.float64 | npt.NDArray[np.float64]) assert_type(np.trapezoid(AR_f8, AR_f8), np.float64 | npt.NDArray[np.float64]) assert_type(np.trapezoid(AR_c16), np.complex128 | npt.NDArray[np.complex128]) assert_type(np.trapezoid(AR_c16, AR_c16), np.complex128 | npt.NDArray[np.complex128]) -assert_type(np.trapezoid(AR_m), np.timedelta64 | npt.NDArray[np.timedelta64]) -assert_type(np.trapezoid(AR_O), float | npt.NDArray[np.object_]) -assert_type(np.trapezoid(AR_O, AR_LIKE_f8), float | npt.NDArray[np.object_]) +# NOTE: Mypy incorrectly infers `Any`, but pyright behaves correctly. +assert_type(np.trapezoid(AR_m), np.timedelta64 | npt.NDArray[np.timedelta64]) # type: ignore[assert-type] +assert_type(np.trapezoid(AR_O), npt.NDArray[np.object_] | Any) +assert_type(np.trapezoid(AR_O, AR_LIKE_f8), npt.NDArray[np.object_] | Any) +# meshgrid assert_type(np.meshgrid(), tuple[()]) -assert_type(np.meshgrid(AR_c16, indexing="ij"), tuple[npt.NDArray[np.complex128]]) -assert_type(np.meshgrid(AR_i8, AR_f8, copy=False), tuple[npt.NDArray[np.int64], npt.NDArray[np.float64]]) -assert_type(np.meshgrid(AR_LIKE_f8, AR_f8), tuple[npt.NDArray[Any], npt.NDArray[np.float64]]) -assert_type(np.meshgrid(AR_LIKE_f8, AR_i8, AR_c16), tuple[npt.NDArray[Any], npt.NDArray[Any], npt.NDArray[Any]]) -assert_type(np.meshgrid(AR_f8, AR_f8, AR_f8, AR_f8), tuple[npt.NDArray[Any], npt.NDArray[Any], npt.NDArray[Any], npt.NDArray[Any]]) -assert_type(np.meshgrid(*AR_LIKE_f8), tuple[npt.NDArray[Any], ...]) +assert_type( + np.meshgrid(AR_f8), + tuple[ + np.ndarray[tuple[int], np.dtype[np.float64]], + ], +) +assert_type( + np.meshgrid(AR_c16, indexing="ij"), + tuple[ + np.ndarray[tuple[int], np.dtype[np.complex128]], + ], +) +assert_type( + np.meshgrid(AR_i8, AR_f8, copy=False), + tuple[ + np.ndarray[tuple[int, int], np.dtype[np.int64]], + np.ndarray[tuple[int, int], np.dtype[np.float64]], + ], +) +assert_type( + np.meshgrid(AR_LIKE_f8, AR_f8), + tuple[ + np.ndarray[tuple[int, int]], + np.ndarray[tuple[int, int], np.dtype[np.float64]], + ], +) +assert_type( + np.meshgrid(AR_f8, AR_LIKE_f8), + tuple[ + np.ndarray[tuple[int, int], np.dtype[np.float64]], + np.ndarray[tuple[int, int]], + ], +) +assert_type( + np.meshgrid(AR_LIKE_f8, AR_LIKE_f8), + tuple[ + np.ndarray[tuple[int, int]], + np.ndarray[tuple[int, int]], + ], +) +assert_type( + np.meshgrid(AR_f8, AR_i8, AR_c16), + tuple[ + np.ndarray[tuple[int, int, int], np.dtype[np.float64]], + np.ndarray[tuple[int, int, int], np.dtype[np.int64]], + np.ndarray[tuple[int, int, int], np.dtype[np.complex128]], + ], +) +assert_type(np.meshgrid(AR_f8, AR_f8, AR_f8, AR_f8), tuple[npt.NDArray[np.float64], ...]) +assert_type(np.meshgrid(AR_f8, AR_f8, AR_f8, AR_LIKE_f8), tuple[np.ndarray, ...]) +assert_type(np.meshgrid(*AR_LIKE_f8), tuple[np.ndarray, ...]) -assert_type(np.delete(AR_f8, np.s_[:5]), npt.NDArray[np.float64]) -assert_type(np.delete(AR_LIKE_f8, [0, 4, 9], axis=0), npt.NDArray[Any]) +# delete +assert_type(np.delete(AR_f8, np.s_[:5]), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(np.delete(AR_LIKE_f8, [0, 4, 9], axis=0), np.ndarray) -assert_type(np.insert(AR_f8, np.s_[:5], 5), npt.NDArray[np.float64]) -assert_type(np.insert(AR_LIKE_f8, [0, 4, 9], [0.5, 9.2, 7], axis=0), npt.NDArray[Any]) +# insert +assert_type(np.insert(AR_f8, np.s_[:5], 5), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(np.insert(AR_LIKE_f8, [0, 4, 9], [0.5, 9.2, 7], axis=0), np.ndarray) -assert_type(np.append(AR_f8, 5), npt.NDArray[Any]) -assert_type(np.append(AR_LIKE_f8, 1j, axis=0), npt.NDArray[Any]) +# append +assert_type(np.append(f8, f8), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(np.append(AR_f8, AR_f8), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(np.append(AR_LIKE_f8, AR_LIKE_c16, axis=0), np.ndarray) +assert_type(np.append(AR_f8, AR_LIKE_f8, axis=0), np.ndarray) +# digitize assert_type(np.digitize(4.5, [1]), np.intp) assert_type(np.digitize(AR_f8, [1, 2, 3]), npt.NDArray[np.intp]) diff --git a/numpy/typing/tests/data/reveal/lib_polynomial.pyi b/numpy/typing/tests/data/reveal/lib_polynomial.pyi index 8b0a9f3d22e7..b7cbadefc610 100644 --- a/numpy/typing/tests/data/reveal/lib_polynomial.pyi +++ b/numpy/typing/tests/data/reveal/lib_polynomial.pyi @@ -118,7 +118,10 @@ assert_type(np.polyadd(AR_O, AR_O), npt.NDArray[np.object_]) assert_type(np.polysub(poly_obj, AR_i8), np.poly1d) assert_type(np.polysub(AR_f8, poly_obj), np.poly1d) -assert_type(np.polysub(AR_b, AR_b), NoReturn) + +def test_invalid_polysub() -> None: + assert_type(np.polysub(AR_b, AR_b), NoReturn) + assert_type(np.polysub(AR_u4, AR_b), npt.NDArray[np.unsignedinteger]) assert_type(np.polysub(AR_i8, AR_i8), npt.NDArray[np.signedinteger]) assert_type(np.polysub(AR_f8, AR_i8), npt.NDArray[np.floating]) diff --git a/numpy/typing/tests/data/reveal/linalg.pyi b/numpy/typing/tests/data/reveal/linalg.pyi index 417fb0d8c558..7bafc58789ff 100644 --- a/numpy/typing/tests/data/reveal/linalg.pyi +++ b/numpy/typing/tests/data/reveal/linalg.pyi @@ -1,4 +1,4 @@ -from typing import Any, assert_type +from typing import Any, Literal, assert_type import numpy as np import numpy.typing as npt @@ -10,96 +10,262 @@ from numpy.linalg._linalg import ( SVDResult, ) +type _Array1D[ScalarT: np.generic] = np.ndarray[tuple[int], np.dtype[ScalarT]] +type _Array2D[ScalarT: np.generic] = np.ndarray[tuple[int, int], np.dtype[ScalarT]] + +bool_list_2d: list[list[bool]] +int_list_2d: list[list[int]] +float_list_1d: list[float] +float_list_2d: list[list[float]] +float_list_3d: list[list[list[float]]] +float_list_4d: list[list[list[list[float]]]] +complex_list_2d: list[list[complex]] +complex_list_3d: list[list[list[complex]]] +bytes_list_2d: list[list[bytes]] +str_list_2d: list[list[str]] + +AR_any: np.ndarray +AR_f_: npt.NDArray[np.floating] +AR_c_: npt.NDArray[np.complexfloating] AR_i8: npt.NDArray[np.int64] +AR_f4: npt.NDArray[np.float32] AR_f8: npt.NDArray[np.float64] +AR_c8: npt.NDArray[np.complex64] AR_c16: npt.NDArray[np.complex128] AR_O: npt.NDArray[np.object_] AR_m: npt.NDArray[np.timedelta64] AR_S: npt.NDArray[np.str_] AR_b: npt.NDArray[np.bool] +SC_f8: np.float64 +AR_f8_0d: np.ndarray[tuple[()], np.dtype[np.float64]] +AR_f8_1d: np.ndarray[tuple[int], np.dtype[np.float64]] +AR_f8_2d: np.ndarray[tuple[int, int], np.dtype[np.float64]] +AR_f8_3d: np.ndarray[tuple[int, int, int], np.dtype[np.float64]] +AR_f8_4d: np.ndarray[tuple[int, int, int, int], np.dtype[np.float64]] + +AR_f4_1d: np.ndarray[tuple[int], np.dtype[np.float32]] +AR_f4_2d: np.ndarray[tuple[int, int], np.dtype[np.float32]] +AR_f4_3d: np.ndarray[tuple[int, int, int], np.dtype[np.float32]] + +### + assert_type(np.linalg.tensorsolve(AR_i8, AR_i8), npt.NDArray[np.float64]) -assert_type(np.linalg.tensorsolve(AR_i8, AR_f8), npt.NDArray[np.floating]) -assert_type(np.linalg.tensorsolve(AR_c16, AR_f8), npt.NDArray[np.complexfloating]) +assert_type(np.linalg.tensorsolve(AR_i8, AR_f8), npt.NDArray[np.float64]) +assert_type(np.linalg.tensorsolve(AR_f4, AR_f4), npt.NDArray[np.float32]) +assert_type(np.linalg.tensorsolve(AR_c16, AR_f8), npt.NDArray[np.complex128]) +assert_type(np.linalg.tensorsolve(AR_c8, AR_f4), npt.NDArray[np.complex64]) +assert_type(np.linalg.tensorsolve(AR_f4, AR_c8), npt.NDArray[np.complex64]) assert_type(np.linalg.solve(AR_i8, AR_i8), npt.NDArray[np.float64]) -assert_type(np.linalg.solve(AR_i8, AR_f8), npt.NDArray[np.floating]) -assert_type(np.linalg.solve(AR_c16, AR_f8), npt.NDArray[np.complexfloating]) +assert_type(np.linalg.solve(AR_i8, AR_f8), npt.NDArray[np.float64]) +assert_type(np.linalg.solve(AR_f4, AR_f4), npt.NDArray[np.float32]) +assert_type(np.linalg.solve(AR_c16, AR_f8), npt.NDArray[np.complex128]) +assert_type(np.linalg.solve(AR_c8, AR_f4), npt.NDArray[np.complex64]) +assert_type(np.linalg.solve(AR_f4, AR_c8), npt.NDArray[np.complex64]) assert_type(np.linalg.tensorinv(AR_i8), npt.NDArray[np.float64]) -assert_type(np.linalg.tensorinv(AR_f8), npt.NDArray[np.floating]) -assert_type(np.linalg.tensorinv(AR_c16), npt.NDArray[np.complexfloating]) +assert_type(np.linalg.tensorinv(AR_f8), npt.NDArray[np.float64]) +assert_type(np.linalg.tensorinv(AR_c16), npt.NDArray[np.complex128]) assert_type(np.linalg.inv(AR_i8), npt.NDArray[np.float64]) -assert_type(np.linalg.inv(AR_f8), npt.NDArray[np.floating]) -assert_type(np.linalg.inv(AR_c16), npt.NDArray[np.complexfloating]) +assert_type(np.linalg.inv(AR_f8), npt.NDArray[np.float64]) +assert_type(np.linalg.inv(AR_c16), npt.NDArray[np.complex128]) + +assert_type(np.linalg.pinv(AR_i8), npt.NDArray[np.float64]) +assert_type(np.linalg.pinv(AR_f8), npt.NDArray[np.float64]) +assert_type(np.linalg.pinv(AR_c16), npt.NDArray[np.complex128]) -assert_type(np.linalg.matrix_power(AR_i8, -1), npt.NDArray[Any]) -assert_type(np.linalg.matrix_power(AR_f8, 0), npt.NDArray[Any]) -assert_type(np.linalg.matrix_power(AR_c16, 1), npt.NDArray[Any]) -assert_type(np.linalg.matrix_power(AR_O, 2), npt.NDArray[Any]) +assert_type(np.linalg.matrix_power(AR_i8, -1), npt.NDArray[np.float64]) +assert_type(np.linalg.matrix_power(AR_i8, 1), npt.NDArray[np.int64]) +assert_type(np.linalg.matrix_power(AR_f8, 0), npt.NDArray[np.float64]) +assert_type(np.linalg.matrix_power(AR_c16, 1), npt.NDArray[np.complex128]) +assert_type(np.linalg.matrix_power(AR_O, 2), npt.NDArray[np.object_]) assert_type(np.linalg.cholesky(AR_i8), npt.NDArray[np.float64]) -assert_type(np.linalg.cholesky(AR_f8), npt.NDArray[np.floating]) -assert_type(np.linalg.cholesky(AR_c16), npt.NDArray[np.complexfloating]) +assert_type(np.linalg.cholesky(AR_f8), npt.NDArray[np.float64]) +assert_type(np.linalg.cholesky(AR_c16), npt.NDArray[np.complex128]) -assert_type(np.linalg.outer(AR_i8, AR_i8), npt.NDArray[np.signedinteger]) -assert_type(np.linalg.outer(AR_f8, AR_f8), npt.NDArray[np.floating]) -assert_type(np.linalg.outer(AR_c16, AR_c16), npt.NDArray[np.complexfloating]) +assert_type(np.linalg.outer(AR_i8, AR_i8), npt.NDArray[np.int64]) +assert_type(np.linalg.outer(AR_f8, AR_f8), npt.NDArray[np.float64]) +assert_type(np.linalg.outer(AR_c16, AR_c16), npt.NDArray[np.complex128]) assert_type(np.linalg.outer(AR_b, AR_b), npt.NDArray[np.bool]) assert_type(np.linalg.outer(AR_O, AR_O), npt.NDArray[np.object_]) -assert_type(np.linalg.outer(AR_i8, AR_m), npt.NDArray[np.timedelta64]) +# NOTE: Mypy incorrectly infers `ndarray[Any, Any]`, but pyright behaves correctly. +assert_type(np.linalg.outer(AR_i8, AR_m), npt.NDArray[np.timedelta64]) # type: ignore[assert-type] -assert_type(np.linalg.qr(AR_i8), QRResult) -assert_type(np.linalg.qr(AR_f8), QRResult) -assert_type(np.linalg.qr(AR_c16), QRResult) +assert_type(np.linalg.qr(AR_i8), QRResult[np.float64]) +assert_type(np.linalg.qr(AR_i8, "r"), npt.NDArray[np.float64]) +assert_type(np.linalg.qr(AR_i8, "raw"), tuple[npt.NDArray[np.float64], npt.NDArray[np.float64]]) +assert_type(np.linalg.qr(AR_f4), QRResult[np.float32]) +assert_type(np.linalg.qr(AR_f4, "r"), npt.NDArray[np.float32]) +assert_type(np.linalg.qr(AR_f4, "raw"), tuple[npt.NDArray[np.float32], npt.NDArray[np.float32]]) +assert_type(np.linalg.qr(AR_f8), QRResult[np.float64]) +assert_type(np.linalg.qr(AR_f8, "r"), npt.NDArray[np.float64]) +assert_type(np.linalg.qr(AR_f8, "raw"), tuple[npt.NDArray[np.float64], npt.NDArray[np.float64]]) +assert_type(np.linalg.qr(AR_c8), QRResult[np.complex64]) +assert_type(np.linalg.qr(AR_c8, "r"), npt.NDArray[np.complex64]) +assert_type(np.linalg.qr(AR_c8, "raw"), tuple[npt.NDArray[np.complex64], npt.NDArray[np.complex64]]) +assert_type(np.linalg.qr(AR_c16), QRResult[np.complex128]) +assert_type(np.linalg.qr(AR_c16, "r"), npt.NDArray[np.complex128]) +assert_type(np.linalg.qr(AR_c16, "raw"), tuple[npt.NDArray[np.complex128], npt.NDArray[np.complex128]]) +# Mypy bug: `Expression is of type "QRResult[Any]", not "QRResult[Any]"` +assert_type(np.linalg.qr(AR_any), QRResult[Any]) # type: ignore[assert-type] +# Mypy bug: `Expression is of type "ndarray[Any, Any]", not "ndarray[tuple[Any, ...], dtype[Any]]"` +assert_type(np.linalg.qr(AR_any, "r"), npt.NDArray[Any]) # type: ignore[assert-type] +# Mypy bug: `Expression is of type "tuple[Any, ...]", <--snip-->"` +assert_type(np.linalg.qr(AR_any, "raw"), tuple[npt.NDArray[Any], npt.NDArray[Any]]) # type: ignore[assert-type] assert_type(np.linalg.eigvals(AR_i8), npt.NDArray[np.float64] | npt.NDArray[np.complex128]) -assert_type(np.linalg.eigvals(AR_f8), npt.NDArray[np.floating] | npt.NDArray[np.complexfloating]) -assert_type(np.linalg.eigvals(AR_c16), npt.NDArray[np.complexfloating]) +assert_type(np.linalg.eigvals(AR_f8), npt.NDArray[np.float64] | npt.NDArray[np.complex128]) +assert_type(np.linalg.eigvals(AR_c16), npt.NDArray[np.complex128]) assert_type(np.linalg.eigvalsh(AR_i8), npt.NDArray[np.float64]) -assert_type(np.linalg.eigvalsh(AR_f8), npt.NDArray[np.floating]) -assert_type(np.linalg.eigvalsh(AR_c16), npt.NDArray[np.floating]) +assert_type(np.linalg.eigvalsh(AR_f8), npt.NDArray[np.float64]) +assert_type(np.linalg.eigvalsh(AR_c16), npt.NDArray[np.float64]) -assert_type(np.linalg.eig(AR_i8), EigResult) -assert_type(np.linalg.eig(AR_f8), EigResult) -assert_type(np.linalg.eig(AR_c16), EigResult) +assert_type(np.linalg.eig(AR_i8), EigResult[np.float64] | EigResult[np.complex128]) +assert_type(np.linalg.eig(AR_f4), EigResult[np.float32] | EigResult[np.complex64]) +assert_type(np.linalg.eig(AR_f8), EigResult[np.float64] | EigResult[np.complex128]) +assert_type(np.linalg.eig(AR_c8), EigResult[np.complex64]) +assert_type(np.linalg.eig(AR_c16), EigResult[np.complex128]) +# Mypy bug: `Expression is of type "EigResult[Any]", not "EigResult[Any]"` +assert_type(np.linalg.eig(AR_f_), EigResult[Any]) # type: ignore[assert-type] +assert_type(np.linalg.eig(AR_c_), EigResult[Any]) # type: ignore[assert-type] +assert_type(np.linalg.eig(AR_any), EigResult[Any]) # type: ignore[assert-type] -assert_type(np.linalg.eigh(AR_i8), EighResult) -assert_type(np.linalg.eigh(AR_f8), EighResult) -assert_type(np.linalg.eigh(AR_c16), EighResult) +assert_type(np.linalg.eigh(AR_i8), EighResult[np.float64, np.float64]) +assert_type(np.linalg.eigh(AR_f4), EighResult[np.float32, np.float32]) +assert_type(np.linalg.eigh(AR_f8), EighResult[np.float64, np.float64]) +assert_type(np.linalg.eigh(AR_c8), EighResult[np.float32, np.complex64]) +assert_type(np.linalg.eigh(AR_c16), EighResult[np.float64, np.complex128]) +# Mypy bug: `Expression is of type "EighResult[Any, Any]", not "EighResult[Any, Any]"` +assert_type(np.linalg.eigh(AR_any), EighResult[Any, Any]) # type: ignore[assert-type] -assert_type(np.linalg.svd(AR_i8), SVDResult) -assert_type(np.linalg.svd(AR_f8), SVDResult) -assert_type(np.linalg.svd(AR_c16), SVDResult) +assert_type(np.linalg.svd(AR_i8), SVDResult[np.float64, np.float64]) assert_type(np.linalg.svd(AR_i8, compute_uv=False), npt.NDArray[np.float64]) -assert_type(np.linalg.svd(AR_f8, compute_uv=False), npt.NDArray[np.floating]) -assert_type(np.linalg.svd(AR_c16, compute_uv=False), npt.NDArray[np.floating]) +assert_type(np.linalg.svd(AR_f4), SVDResult[np.float32, np.float32]) +assert_type(np.linalg.svd(AR_f4, compute_uv=False), npt.NDArray[np.float32]) +assert_type(np.linalg.svd(AR_f8), SVDResult[np.float64, np.float64]) +assert_type(np.linalg.svd(AR_f8, compute_uv=False), npt.NDArray[np.float64]) +assert_type(np.linalg.svd(AR_c8), SVDResult[np.float32, np.complex64]) +assert_type(np.linalg.svd(AR_c8, compute_uv=False), npt.NDArray[np.float32]) +assert_type(np.linalg.svd(AR_c16), SVDResult[np.float64, np.complex128]) +assert_type(np.linalg.svd(AR_c16, compute_uv=False), npt.NDArray[np.float64]) +assert_type(np.linalg.svd(int_list_2d), SVDResult[np.float64, np.float64]) +assert_type(np.linalg.svd(int_list_2d, compute_uv=False), npt.NDArray[np.float64]) +assert_type(np.linalg.svd(float_list_2d), SVDResult[np.float64, np.float64]) +assert_type(np.linalg.svd(float_list_2d, compute_uv=False), npt.NDArray[np.float64]) +assert_type(np.linalg.svd(complex_list_2d), SVDResult[np.float64, np.complex128]) +assert_type(np.linalg.svd(complex_list_2d, compute_uv=False), npt.NDArray[np.float64]) +# Mypy bug: `Expression is of type "SVDResult[Any, Any]", not "SVDResult[Any, Any]"` +assert_type(np.linalg.svd(AR_any), SVDResult[Any, Any]) # type: ignore[assert-type] +# Mypy bug: `Expression is of type "ndarray[Any, Any]", not "ndarray[tuple[Any, ...], dtype[Any]]"` +assert_type(np.linalg.svd(AR_any, compute_uv=False), npt.NDArray[Any]) # type: ignore[assert-type] -assert_type(np.linalg.cond(AR_i8), Any) -assert_type(np.linalg.cond(AR_f8), Any) -assert_type(np.linalg.cond(AR_c16), Any) +assert_type(np.linalg.svdvals(AR_b), npt.NDArray[np.float64]) +assert_type(np.linalg.svdvals(AR_i8), npt.NDArray[np.float64]) +assert_type(np.linalg.svdvals(AR_f4), npt.NDArray[np.float32]) +assert_type(np.linalg.svdvals(AR_f8), npt.NDArray[np.float64]) +assert_type(np.linalg.svdvals(AR_c8), npt.NDArray[np.float32]) +assert_type(np.linalg.svdvals(AR_c16), npt.NDArray[np.float64]) +assert_type(np.linalg.svdvals(int_list_2d), npt.NDArray[np.float64]) +assert_type(np.linalg.svdvals(float_list_2d), npt.NDArray[np.float64]) +assert_type(np.linalg.svdvals(complex_list_2d), npt.NDArray[np.float64]) assert_type(np.linalg.matrix_rank(AR_i8), Any) assert_type(np.linalg.matrix_rank(AR_f8), Any) assert_type(np.linalg.matrix_rank(AR_c16), Any) +assert_type(np.linalg.matrix_rank(SC_f8), Literal[0, 1]) +assert_type(np.linalg.matrix_rank(AR_f8_1d), Literal[0, 1]) +assert_type(np.linalg.matrix_rank(float_list_1d), Literal[0, 1]) +assert_type(np.linalg.matrix_rank(AR_f8_2d), np.int_) +assert_type(np.linalg.matrix_rank(float_list_2d), np.int_) +assert_type(np.linalg.matrix_rank(AR_f8_3d), np.ndarray[tuple[int], np.dtype[np.int_]]) +assert_type(np.linalg.matrix_rank(float_list_3d), np.ndarray[tuple[int], np.dtype[np.int_]]) +assert_type(np.linalg.matrix_rank(AR_f8_4d), npt.NDArray[np.int_]) +assert_type(np.linalg.matrix_rank(float_list_4d), npt.NDArray[np.int_]) -assert_type(np.linalg.pinv(AR_i8), npt.NDArray[np.float64]) -assert_type(np.linalg.pinv(AR_f8), npt.NDArray[np.floating]) -assert_type(np.linalg.pinv(AR_c16), npt.NDArray[np.complexfloating]) +assert_type(np.linalg.cond(AR_i8), Any) +assert_type(np.linalg.cond(AR_f8), Any) +assert_type(np.linalg.cond(AR_c16), Any) +assert_type(np.linalg.cond(AR_f4_2d), np.float32) +assert_type(np.linalg.cond(AR_f8_2d), np.float64) +assert_type(np.linalg.cond(AR_f4_3d), npt.NDArray[np.float32]) +assert_type(np.linalg.cond(AR_f8_3d), npt.NDArray[np.float64]) assert_type(np.linalg.slogdet(AR_i8), SlogdetResult) assert_type(np.linalg.slogdet(AR_f8), SlogdetResult) assert_type(np.linalg.slogdet(AR_c16), SlogdetResult) +assert_type(np.linalg.slogdet(AR_f4_2d), SlogdetResult[np.float32, np.float32]) +assert_type(np.linalg.slogdet(AR_f8_2d), SlogdetResult[np.float64, np.float64]) +assert_type(np.linalg.slogdet(AR_f4_3d), SlogdetResult[npt.NDArray[np.float32], npt.NDArray[np.float32]]) +assert_type(np.linalg.slogdet(AR_f8_3d), SlogdetResult[npt.NDArray[np.float64], npt.NDArray[np.float64]]) +assert_type(np.linalg.slogdet(complex_list_2d), SlogdetResult[np.float64, np.complex128]) +assert_type(np.linalg.slogdet(complex_list_3d), SlogdetResult[npt.NDArray[np.float64], npt.NDArray[np.complex128]]) assert_type(np.linalg.det(AR_i8), Any) assert_type(np.linalg.det(AR_f8), Any) assert_type(np.linalg.det(AR_c16), Any) +assert_type(np.linalg.det(AR_f4_2d), np.float32) +assert_type(np.linalg.det(AR_f8_2d), np.float64) +assert_type(np.linalg.det(AR_f4_3d), npt.NDArray[np.float32]) +assert_type(np.linalg.det(AR_f8_3d), npt.NDArray[np.float64]) +assert_type(np.linalg.det(complex_list_2d), np.complex128) +assert_type(np.linalg.det(complex_list_3d), npt.NDArray[np.complex128]) -assert_type(np.linalg.lstsq(AR_i8, AR_i8), tuple[npt.NDArray[np.float64], npt.NDArray[np.float64], np.int32, npt.NDArray[np.float64]]) -assert_type(np.linalg.lstsq(AR_i8, AR_f8), tuple[npt.NDArray[np.floating], npt.NDArray[np.floating], np.int32, npt.NDArray[np.floating]]) -assert_type(np.linalg.lstsq(AR_f8, AR_c16), tuple[npt.NDArray[np.complexfloating], npt.NDArray[np.floating], np.int32, npt.NDArray[np.floating]]) +assert_type( + np.linalg.lstsq(AR_i8, AR_i8), + tuple[npt.NDArray[np.float64], _Array1D[np.float64], np.int32, _Array1D[np.float64]], +) +assert_type( + np.linalg.lstsq(AR_f4, AR_f4), + tuple[npt.NDArray[np.float32], _Array1D[np.float32], np.int32, _Array1D[np.float32]], +) +assert_type( + np.linalg.lstsq(AR_i8, AR_f8), + tuple[npt.NDArray[np.float64], _Array1D[np.float64], np.int32, _Array1D[np.float64]], +) +assert_type( + np.linalg.lstsq(AR_f4, AR_f8), + tuple[npt.NDArray[np.float64], _Array1D[np.float64], np.int32, _Array1D[np.float64]], +) +assert_type( + np.linalg.lstsq(AR_f8, AR_i8), + tuple[npt.NDArray[np.float64], _Array1D[np.float64], np.int32, _Array1D[np.float64]], +) +assert_type( + np.linalg.lstsq(AR_f8, AR_f4), + tuple[npt.NDArray[np.float64], _Array1D[np.float64], np.int32, _Array1D[np.float64]], +) +assert_type( + np.linalg.lstsq(AR_c8, AR_c8), + tuple[npt.NDArray[np.complex64], _Array1D[np.float32], np.int32, _Array1D[np.float32]], +) +assert_type( + np.linalg.lstsq(AR_c8, AR_c16), + tuple[npt.NDArray[np.complex128], _Array1D[np.float64], np.int32, _Array1D[np.float64]], +) +assert_type( + np.linalg.lstsq(AR_c16, AR_c8), + tuple[npt.NDArray[np.complex128], _Array1D[np.float64], np.int32, _Array1D[np.float64]], +) +assert_type( + np.linalg.lstsq(AR_f8, AR_f8_1d), + tuple[_Array1D[np.float64], _Array1D[np.float64], np.int32, _Array1D[np.float64]], +) +assert_type( + np.linalg.lstsq(AR_f4, AR_f4_1d), + tuple[_Array1D[np.float32], _Array1D[np.float32], np.int32, _Array1D[np.float32]], +) +assert_type( + np.linalg.lstsq(AR_f8, AR_f8_2d), + tuple[_Array2D[np.float64], _Array1D[np.float64], np.int32, _Array1D[np.float64]], +) +assert_type( + np.linalg.lstsq(AR_f4, AR_f4_2d), + tuple[_Array2D[np.float32], _Array1D[np.float32], np.int32, _Array1D[np.float32]], +) assert_type(np.linalg.norm(AR_i8), np.floating) assert_type(np.linalg.norm(AR_f8), np.floating) @@ -117,16 +283,48 @@ assert_type(np.linalg.vector_norm(AR_f8), np.floating) assert_type(np.linalg.vector_norm(AR_c16), np.floating) assert_type(np.linalg.vector_norm(AR_S), np.floating) +assert_type(np.linalg.tensordot(AR_b, AR_b), npt.NDArray[np.bool]) +assert_type(np.linalg.tensordot(AR_i8, AR_i8), npt.NDArray[np.int64]) +assert_type(np.linalg.tensordot(AR_f8, AR_f8), npt.NDArray[np.float64]) +assert_type(np.linalg.tensordot(AR_c16, AR_c16), npt.NDArray[np.complex128]) +assert_type(np.linalg.tensordot(AR_m, AR_m), npt.NDArray[np.timedelta64]) +assert_type(np.linalg.tensordot(AR_O, AR_O), npt.NDArray[np.object_]) + assert_type(np.linalg.multi_dot([AR_i8, AR_i8]), Any) assert_type(np.linalg.multi_dot([AR_i8, AR_f8]), Any) assert_type(np.linalg.multi_dot([AR_f8, AR_c16]), Any) assert_type(np.linalg.multi_dot([AR_O, AR_O]), Any) assert_type(np.linalg.multi_dot([AR_m, AR_m]), Any) +# Mypy incorrectly infers `ndarray[Any, Any]`, but pyright behaves correctly. +assert_type(np.linalg.diagonal(AR_any), np.ndarray) # type: ignore[assert-type] +assert_type(np.linalg.diagonal(AR_f4), npt.NDArray[np.float32]) +assert_type(np.linalg.diagonal(AR_f4_2d), np.ndarray[tuple[int], np.dtype[np.float32]]) +assert_type(np.linalg.diagonal(AR_f8_2d), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(np.linalg.diagonal(bool_list_2d), npt.NDArray[np.bool]) +assert_type(np.linalg.diagonal(int_list_2d), npt.NDArray[np.int_]) +assert_type(np.linalg.diagonal(float_list_2d), npt.NDArray[np.float64]) +assert_type(np.linalg.diagonal(complex_list_2d), npt.NDArray[np.complex128]) +assert_type(np.linalg.diagonal(bytes_list_2d), npt.NDArray[np.bytes_]) +assert_type(np.linalg.diagonal(str_list_2d), npt.NDArray[np.str_]) + +assert_type(np.linalg.trace(AR_any), Any) +assert_type(np.linalg.trace(AR_f4), Any) +assert_type(np.linalg.trace(AR_f4_2d), np.float32) +assert_type(np.linalg.trace(AR_f8_2d), np.float64) +assert_type(np.linalg.trace(AR_f4_3d), np.ndarray[tuple[int], np.dtype[np.float32]]) +assert_type(np.linalg.trace(AR_f8_3d), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(np.linalg.trace(AR_f8_4d), np.ndarray[tuple[int, *tuple[Any, ...]], np.dtype[np.float64]]) +assert_type(np.linalg.trace(bool_list_2d), np.bool) +assert_type(np.linalg.trace(int_list_2d), np.int_) +assert_type(np.linalg.trace(float_list_2d), np.float64) +assert_type(np.linalg.trace(complex_list_2d), np.complex128) +assert_type(np.linalg.trace(float_list_3d), npt.NDArray[np.float64]) + assert_type(np.linalg.cross(AR_i8, AR_i8), npt.NDArray[np.signedinteger]) assert_type(np.linalg.cross(AR_f8, AR_f8), npt.NDArray[np.floating]) assert_type(np.linalg.cross(AR_c16, AR_c16), npt.NDArray[np.complexfloating]) -assert_type(np.linalg.matmul(AR_i8, AR_i8), npt.NDArray[np.signedinteger]) -assert_type(np.linalg.matmul(AR_f8, AR_f8), npt.NDArray[np.floating]) -assert_type(np.linalg.matmul(AR_c16, AR_c16), npt.NDArray[np.complexfloating]) +assert_type(np.linalg.matmul(AR_i8, AR_i8), npt.NDArray[np.int64]) +assert_type(np.linalg.matmul(AR_f8, AR_f8), npt.NDArray[np.float64]) +assert_type(np.linalg.matmul(AR_c16, AR_c16), npt.NDArray[np.complex128]) diff --git a/numpy/typing/tests/data/reveal/ma.pyi b/numpy/typing/tests/data/reveal/ma.pyi index deda19c3d743..a94d278e87f4 100644 --- a/numpy/typing/tests/data/reveal/ma.pyi +++ b/numpy/typing/tests/data/reveal/ma.pyi @@ -1,41 +1,66 @@ -from typing import Any, Literal, TypeAlias, TypeVar, assert_type +from typing import Any, Literal, NoReturn, assert_type import numpy as np -from numpy import dtype, generic from numpy._typing import NDArray, _AnyShape -_ScalarT = TypeVar("_ScalarT", bound=generic) -MaskedArray: TypeAlias = np.ma.MaskedArray[_AnyShape, dtype[_ScalarT]] -_Array1D: TypeAlias = np.ndarray[tuple[int], np.dtype[_ScalarT]] +type MaskedArray[ScalarT: np.generic] = np.ma.MaskedArray[_AnyShape, np.dtype[ScalarT]] +type _NoMaskType = np.bool[Literal[False]] +type _Array1D[ScalarT: np.generic] = np.ndarray[tuple[int], np.dtype[ScalarT]] -class MaskedArraySubclass(MaskedArray[np.complex128]): ... +### + +class MaskedArraySubclass[ScalarT: np.generic](np.ma.MaskedArray[_AnyShape, np.dtype[ScalarT]]): ... + +class IntoMaskedArraySubClass[ScalarT: np.generic]: + def __array__(self) -> MaskedArraySubclass[ScalarT]: ... + +type MaskedArraySubclassC = MaskedArraySubclass[np.complex128] AR_b: NDArray[np.bool] AR_f4: NDArray[np.float32] +AR_i8: NDArray[np.int64] +AR_u4: NDArray[np.uint32] AR_dt64: NDArray[np.datetime64] AR_td64: NDArray[np.timedelta64] AR_o: NDArray[np.timedelta64] +AR_LIKE_b: list[bool] +AR_LIKE_u: list[np.uint32] +AR_LIKE_i: list[int] +AR_LIKE_f: list[float] +AR_LIKE_c: list[complex] +AR_LIKE_td64: list[np.timedelta64] +AR_LIKE_dt64: list[np.datetime64] +AR_LIKE_o: list[np.object_] +AR_number: NDArray[np.number] + +MAR_c8: MaskedArray[np.complex64] MAR_c16: MaskedArray[np.complex128] MAR_b: MaskedArray[np.bool] MAR_f4: MaskedArray[np.float32] MAR_f8: MaskedArray[np.float64] MAR_i8: MaskedArray[np.int64] +MAR_u4: MaskedArray[np.uint32] MAR_dt64: MaskedArray[np.datetime64] MAR_td64: MaskedArray[np.timedelta64] MAR_o: MaskedArray[np.object_] MAR_s: MaskedArray[np.str_] MAR_byte: MaskedArray[np.bytes_] MAR_V: MaskedArray[np.void] +MAR_floating: MaskedArray[np.floating] +MAR_number: MaskedArray[np.number] -MAR_subclass: MaskedArraySubclass +MAR_subclass: MaskedArraySubclassC +MAR_into_subclass: IntoMaskedArraySubClass[np.float32] MAR_1d: np.ma.MaskedArray[tuple[int], np.dtype] MAR_2d_f4: np.ma.MaskedArray[tuple[int, int], np.dtype[np.float32]] +MAR_2d_V: np.ma.MaskedArray[tuple[int, int], np.dtype[np.void]] b: np.bool f4: np.float32 f: float +i: int assert_type(MAR_1d.shape, tuple[int]) @@ -50,9 +75,9 @@ assert_type(np.ma.min(MAR_b, axis=0), Any) assert_type(np.ma.min(MAR_f4, axis=0), Any) assert_type(np.ma.min(MAR_b, keepdims=True), Any) assert_type(np.ma.min(MAR_f4, keepdims=True), Any) -assert_type(np.ma.min(MAR_f4, out=MAR_subclass), MaskedArraySubclass) -assert_type(np.ma.min(MAR_f4, 0, MAR_subclass), MaskedArraySubclass) -assert_type(np.ma.min(MAR_f4, None, MAR_subclass), MaskedArraySubclass) +assert_type(np.ma.min(MAR_f4, out=MAR_subclass), MaskedArraySubclassC) +assert_type(np.ma.min(MAR_f4, 0, MAR_subclass), MaskedArraySubclassC) +assert_type(np.ma.min(MAR_f4, None, MAR_subclass), MaskedArraySubclassC) assert_type(MAR_b.min(), np.bool) assert_type(MAR_f4.min(), np.float32) @@ -60,9 +85,9 @@ assert_type(MAR_b.min(axis=0), Any) assert_type(MAR_f4.min(axis=0), Any) assert_type(MAR_b.min(keepdims=True), Any) assert_type(MAR_f4.min(keepdims=True), Any) -assert_type(MAR_f4.min(out=MAR_subclass), MaskedArraySubclass) -assert_type(MAR_f4.min(0, MAR_subclass), MaskedArraySubclass) -assert_type(MAR_f4.min(None, MAR_subclass), MaskedArraySubclass) +assert_type(MAR_f4.min(out=MAR_subclass), MaskedArraySubclassC) +assert_type(MAR_f4.min(0, MAR_subclass), MaskedArraySubclassC) +assert_type(MAR_f4.min(None, MAR_subclass), MaskedArraySubclassC) assert_type(np.ma.max(MAR_b), np.bool) assert_type(np.ma.max(MAR_f4), np.float32) @@ -70,9 +95,9 @@ assert_type(np.ma.max(MAR_b, axis=0), Any) assert_type(np.ma.max(MAR_f4, axis=0), Any) assert_type(np.ma.max(MAR_b, keepdims=True), Any) assert_type(np.ma.max(MAR_f4, keepdims=True), Any) -assert_type(np.ma.max(MAR_f4, out=MAR_subclass), MaskedArraySubclass) -assert_type(np.ma.max(MAR_f4, 0, MAR_subclass), MaskedArraySubclass) -assert_type(np.ma.max(MAR_f4, None, MAR_subclass), MaskedArraySubclass) +assert_type(np.ma.max(MAR_f4, out=MAR_subclass), MaskedArraySubclassC) +assert_type(np.ma.max(MAR_f4, 0, MAR_subclass), MaskedArraySubclassC) +assert_type(np.ma.max(MAR_f4, None, MAR_subclass), MaskedArraySubclassC) assert_type(MAR_b.max(), np.bool) assert_type(MAR_f4.max(), np.float32) @@ -80,9 +105,9 @@ assert_type(MAR_b.max(axis=0), Any) assert_type(MAR_f4.max(axis=0), Any) assert_type(MAR_b.max(keepdims=True), Any) assert_type(MAR_f4.max(keepdims=True), Any) -assert_type(MAR_f4.max(out=MAR_subclass), MaskedArraySubclass) -assert_type(MAR_f4.max(0, MAR_subclass), MaskedArraySubclass) -assert_type(MAR_f4.max(None, MAR_subclass), MaskedArraySubclass) +assert_type(MAR_f4.max(out=MAR_subclass), MaskedArraySubclassC) +assert_type(MAR_f4.max(0, MAR_subclass), MaskedArraySubclassC) +assert_type(MAR_f4.max(None, MAR_subclass), MaskedArraySubclassC) assert_type(np.ma.ptp(MAR_b), np.bool) assert_type(np.ma.ptp(MAR_f4), np.float32) @@ -90,9 +115,9 @@ assert_type(np.ma.ptp(MAR_b, axis=0), Any) assert_type(np.ma.ptp(MAR_f4, axis=0), Any) assert_type(np.ma.ptp(MAR_b, keepdims=True), Any) assert_type(np.ma.ptp(MAR_f4, keepdims=True), Any) -assert_type(np.ma.ptp(MAR_f4, out=MAR_subclass), MaskedArraySubclass) -assert_type(np.ma.ptp(MAR_f4, 0, MAR_subclass), MaskedArraySubclass) -assert_type(np.ma.ptp(MAR_f4, None, MAR_subclass), MaskedArraySubclass) +assert_type(np.ma.ptp(MAR_f4, out=MAR_subclass), MaskedArraySubclassC) +assert_type(np.ma.ptp(MAR_f4, 0, MAR_subclass), MaskedArraySubclassC) +assert_type(np.ma.ptp(MAR_f4, None, MAR_subclass), MaskedArraySubclassC) assert_type(MAR_b.ptp(), np.bool) assert_type(MAR_f4.ptp(), np.float32) @@ -100,9 +125,9 @@ assert_type(MAR_b.ptp(axis=0), Any) assert_type(MAR_f4.ptp(axis=0), Any) assert_type(MAR_b.ptp(keepdims=True), Any) assert_type(MAR_f4.ptp(keepdims=True), Any) -assert_type(MAR_f4.ptp(out=MAR_subclass), MaskedArraySubclass) -assert_type(MAR_f4.ptp(0, MAR_subclass), MaskedArraySubclass) -assert_type(MAR_f4.ptp(None, MAR_subclass), MaskedArraySubclass) +assert_type(MAR_f4.ptp(out=MAR_subclass), MaskedArraySubclassC) +assert_type(MAR_f4.ptp(0, MAR_subclass), MaskedArraySubclassC) +assert_type(MAR_f4.ptp(None, MAR_subclass), MaskedArraySubclassC) assert_type(MAR_b.argmin(), np.intp) assert_type(MAR_f4.argmin(), np.intp) @@ -110,8 +135,8 @@ assert_type(MAR_f4.argmax(fill_value=6.28318, keepdims=False), np.intp) assert_type(MAR_b.argmin(axis=0), Any) assert_type(MAR_f4.argmin(axis=0), Any) assert_type(MAR_b.argmin(keepdims=True), Any) -assert_type(MAR_f4.argmin(out=MAR_subclass), MaskedArraySubclass) -assert_type(MAR_f4.argmin(None, None, out=MAR_subclass), MaskedArraySubclass) +assert_type(MAR_f4.argmin(out=MAR_subclass), MaskedArraySubclassC) +assert_type(MAR_f4.argmin(None, None, out=MAR_subclass), MaskedArraySubclassC) assert_type(np.ma.argmin(MAR_b), np.intp) assert_type(np.ma.argmin(MAR_f4), np.intp) @@ -119,8 +144,8 @@ assert_type(np.ma.argmin(MAR_f4, fill_value=6.28318, keepdims=False), np.intp) assert_type(np.ma.argmin(MAR_b, axis=0), Any) assert_type(np.ma.argmin(MAR_f4, axis=0), Any) assert_type(np.ma.argmin(MAR_b, keepdims=True), Any) -assert_type(np.ma.argmin(MAR_f4, out=MAR_subclass), MaskedArraySubclass) -assert_type(np.ma.argmin(MAR_f4, None, None, out=MAR_subclass), MaskedArraySubclass) +assert_type(np.ma.argmin(MAR_f4, out=MAR_subclass), MaskedArraySubclassC) +assert_type(np.ma.argmin(MAR_f4, None, None, out=MAR_subclass), MaskedArraySubclassC) assert_type(MAR_b.argmax(), np.intp) assert_type(MAR_f4.argmax(), np.intp) @@ -128,8 +153,8 @@ assert_type(MAR_f4.argmax(fill_value=6.28318, keepdims=False), np.intp) assert_type(MAR_b.argmax(axis=0), Any) assert_type(MAR_f4.argmax(axis=0), Any) assert_type(MAR_b.argmax(keepdims=True), Any) -assert_type(MAR_f4.argmax(out=MAR_subclass), MaskedArraySubclass) -assert_type(MAR_f4.argmax(None, None, out=MAR_subclass), MaskedArraySubclass) +assert_type(MAR_f4.argmax(out=MAR_subclass), MaskedArraySubclassC) +assert_type(MAR_f4.argmax(None, None, out=MAR_subclass), MaskedArraySubclassC) assert_type(np.ma.argmax(MAR_b), np.intp) assert_type(np.ma.argmax(MAR_f4), np.intp) @@ -137,8 +162,8 @@ assert_type(np.ma.argmax(MAR_f4, fill_value=6.28318, keepdims=False), np.intp) assert_type(np.ma.argmax(MAR_b, axis=0), Any) assert_type(np.ma.argmax(MAR_f4, axis=0), Any) assert_type(np.ma.argmax(MAR_b, keepdims=True), Any) -assert_type(np.ma.argmax(MAR_f4, out=MAR_subclass), MaskedArraySubclass) -assert_type(np.ma.argmax(MAR_f4, None, None, out=MAR_subclass), MaskedArraySubclass) +assert_type(np.ma.argmax(MAR_f4, out=MAR_subclass), MaskedArraySubclassC) +assert_type(np.ma.argmax(MAR_f4, None, None, out=MAR_subclass), MaskedArraySubclassC) assert_type(MAR_b.all(), np.bool) assert_type(MAR_f4.all(), np.bool) @@ -148,8 +173,8 @@ assert_type(MAR_b.all(axis=0, keepdims=True), MaskedArray[np.bool]) assert_type(MAR_b.all(0, None, True), MaskedArray[np.bool]) assert_type(MAR_f4.all(axis=0), np.bool | MaskedArray[np.bool]) assert_type(MAR_b.all(keepdims=True), MaskedArray[np.bool]) -assert_type(MAR_f4.all(out=MAR_subclass), MaskedArraySubclass) -assert_type(MAR_f4.all(None, out=MAR_subclass), MaskedArraySubclass) +assert_type(MAR_f4.all(out=MAR_subclass), MaskedArraySubclassC) +assert_type(MAR_f4.all(None, out=MAR_subclass), MaskedArraySubclassC) assert_type(MAR_b.any(), np.bool) assert_type(MAR_f4.any(), np.bool) @@ -159,22 +184,22 @@ assert_type(MAR_b.any(axis=0, keepdims=True), MaskedArray[np.bool]) assert_type(MAR_b.any(0, None, True), MaskedArray[np.bool]) assert_type(MAR_f4.any(axis=0), np.bool | MaskedArray[np.bool]) assert_type(MAR_b.any(keepdims=True), MaskedArray[np.bool]) -assert_type(MAR_f4.any(out=MAR_subclass), MaskedArraySubclass) -assert_type(MAR_f4.any(None, out=MAR_subclass), MaskedArraySubclass) +assert_type(MAR_f4.any(out=MAR_subclass), MaskedArraySubclassC) +assert_type(MAR_f4.any(None, out=MAR_subclass), MaskedArraySubclassC) assert_type(MAR_f4.sort(), None) -assert_type(MAR_f4.sort(axis=0, kind='quicksort', order='K', endwith=False, fill_value=42., stable=False), None) +assert_type(MAR_f4.sort(axis=0, kind="quicksort", order="K", endwith=False, fill_value=42., stable=False), None) assert_type(np.ma.sort(MAR_f4), MaskedArray[np.float32]) -assert_type(np.ma.sort(MAR_subclass), MaskedArraySubclass) +assert_type(np.ma.sort(MAR_subclass), MaskedArraySubclassC) assert_type(np.ma.sort([[0, 1], [2, 3]]), NDArray[Any]) assert_type(np.ma.sort(AR_f4), NDArray[np.float32]) assert_type(MAR_f8.take(0), np.float64) assert_type(MAR_1d.take(0), Any) assert_type(MAR_f8.take([0]), MaskedArray[np.float64]) -assert_type(MAR_f8.take(0, out=MAR_subclass), MaskedArraySubclass) -assert_type(MAR_f8.take([0], out=MAR_subclass), MaskedArraySubclass) +assert_type(MAR_f8.take(0, out=MAR_subclass), MaskedArraySubclassC) +assert_type(MAR_f8.take([0], out=MAR_subclass), MaskedArraySubclassC) assert_type(np.ma.take(f, 0), Any) assert_type(np.ma.take(f4, 0), np.float32) @@ -183,16 +208,16 @@ assert_type(np.ma.take(AR_f4, 0), np.float32) assert_type(np.ma.take(MAR_1d, 0), Any) assert_type(np.ma.take(MAR_f8, [0]), MaskedArray[np.float64]) assert_type(np.ma.take(AR_f4, [0]), MaskedArray[np.float32]) -assert_type(np.ma.take(MAR_f8, 0, out=MAR_subclass), MaskedArraySubclass) -assert_type(np.ma.take(MAR_f8, [0], out=MAR_subclass), MaskedArraySubclass) +assert_type(np.ma.take(MAR_f8, 0, out=MAR_subclass), MaskedArraySubclassC) +assert_type(np.ma.take(MAR_f8, [0], out=MAR_subclass), MaskedArraySubclassC) assert_type(np.ma.take([1], [0]), MaskedArray[Any]) assert_type(np.ma.take(np.eye(2), 1, axis=0), MaskedArray[np.float64]) assert_type(MAR_f4.partition(1), None) -assert_type(MAR_V.partition(1, axis=0, kind='introselect', order='K'), None) +assert_type(MAR_V.partition(1, axis=0, kind="introselect", order="K"), None) assert_type(MAR_f4.argpartition(1), MaskedArray[np.intp]) -assert_type(MAR_1d.argpartition(1, axis=0, kind='introselect', order='K'), MaskedArray[np.intp]) +assert_type(MAR_1d.argpartition(1, axis=0, kind="introselect", order="K"), MaskedArray[np.intp]) assert_type(np.ma.ndim(f4), int) assert_type(np.ma.ndim(MAR_b), int) @@ -261,7 +286,7 @@ assert_type(MAR_byte <= MAR_byte, MaskedArray[np.bool]) assert_type(MAR_byte.count(), int) assert_type(MAR_f4.count(axis=None), int) assert_type(MAR_f4.count(axis=0), NDArray[np.int_]) -assert_type(MAR_b.count(axis=(0,1)), NDArray[np.int_]) +assert_type(MAR_b.count(axis=(0, 1)), NDArray[np.int_]) assert_type(MAR_o.count(keepdims=True), NDArray[np.int_]) assert_type(MAR_o.count(axis=None, keepdims=True), NDArray[np.int_]) assert_type(MAR_o.count(None, True), NDArray[np.int_]) @@ -269,33 +294,40 @@ assert_type(MAR_o.count(None, True), NDArray[np.int_]) assert_type(np.ma.count(MAR_byte), int) assert_type(np.ma.count(MAR_byte, axis=None), int) assert_type(np.ma.count(MAR_f4, axis=0), NDArray[np.int_]) -assert_type(np.ma.count(MAR_b, axis=(0,1)), NDArray[np.int_]) +assert_type(np.ma.count(MAR_b, axis=(0, 1)), NDArray[np.int_]) assert_type(np.ma.count(MAR_o, keepdims=True), NDArray[np.int_]) assert_type(np.ma.count(MAR_o, axis=None, keepdims=True), NDArray[np.int_]) assert_type(np.ma.count(MAR_o, None, True), NDArray[np.int_]) assert_type(MAR_f4.compressed(), np.ndarray[tuple[int], np.dtype[np.float32]]) +assert_type(MAR_f4.compress([True, False]), np.ma.MaskedArray[tuple[int], np.dtype[np.float32]]) +assert_type(MAR_f4.compress([True, False], axis=0), MaskedArray[np.float32]) +assert_type(MAR_f4.compress([True, False], axis=0, out=MAR_subclass), MaskedArraySubclassC) +assert_type(MAR_f4.compress([True, False], 0, MAR_subclass), MaskedArraySubclassC) + assert_type(np.ma.compressed(MAR_i8), np.ndarray[tuple[int], np.dtype[np.int64]]) -assert_type(np.ma.compressed([[1,2,3]]), np.ndarray[tuple[int], np.dtype]) +assert_type(np.ma.compressed([[1, 2, 3]]), np.ndarray[tuple[int], np.dtype]) -assert_type(MAR_f4.put([0,4,8], [10,20,30]), None) +assert_type(MAR_f4.put([0, 4, 8], [10, 20, 30]), None) assert_type(MAR_f4.put(4, 999), None) -assert_type(MAR_f4.put(4, 999, mode='clip'), None) +assert_type(MAR_f4.put(4, 999, mode="clip"), None) + +assert_type(MAR_c8.__array_wrap__(AR_b), MaskedArray[np.bool]) -assert_type(np.ma.put(MAR_f4, [0,4,8], [10,20,30]), None) +assert_type(np.ma.put(MAR_f4, [0, 4, 8], [10, 20, 30]), None) assert_type(np.ma.put(MAR_f4, 4, 999), None) -assert_type(np.ma.put(MAR_f4, 4, 999, mode='clip'), None) +assert_type(np.ma.put(MAR_f4, 4, 999, mode="clip"), None) assert_type(np.ma.putmask(MAR_f4, [True, False], [0, 1]), None) assert_type(np.ma.putmask(MAR_f4, np.False_, [0, 1]), None) -assert_type(MAR_f4.filled(float('nan')), NDArray[np.float32]) +assert_type(MAR_f4.filled(float("nan")), NDArray[np.float32]) assert_type(MAR_i8.filled(), NDArray[np.int64]) assert_type(MAR_1d.filled(), np.ndarray[tuple[int], np.dtype]) -assert_type(np.ma.filled(MAR_f4, float('nan')), NDArray[np.float32]) -assert_type(np.ma.filled([[1,2,3]]), NDArray[Any]) +assert_type(np.ma.filled(MAR_f4, float("nan")), NDArray[np.float32]) +assert_type(np.ma.filled([[1, 2, 3]]), NDArray[Any]) # PyRight detects this one correctly, but mypy doesn't. # https://github.com/numpy/numpy/pull/28742#discussion_r2048968375 assert_type(np.ma.filled(MAR_1d), np.ndarray[tuple[int], np.dtype]) # type: ignore[assert-type] @@ -313,15 +345,15 @@ assert_type(np.ma.allclose(AR_f4, MAR_f4, masked_equal=False), bool) assert_type(np.ma.allclose(AR_f4, MAR_f4, rtol=.4, atol=.3), bool) assert_type(MAR_2d_f4.ravel(), np.ma.MaskedArray[tuple[int], np.dtype[np.float32]]) -assert_type(MAR_1d.ravel(order='A'), np.ma.MaskedArray[tuple[int], np.dtype[Any]]) +assert_type(MAR_1d.ravel(order="A"), np.ma.MaskedArray[tuple[int], np.dtype[Any]]) -assert_type(np.ma.getmask(MAR_f4), NDArray[np.bool] | np.bool) +assert_type(np.ma.getmask(MAR_f4), NDArray[np.bool] | _NoMaskType) # PyRight detects this one correctly, but mypy doesn't: # `Revealed type is "Union[numpy.ndarray[Any, Any], numpy.bool[Any]]"` assert_type(np.ma.getmask(MAR_1d), np.ndarray[tuple[int], np.dtype[np.bool]] | np.bool) # type: ignore[assert-type] -assert_type(np.ma.getmask(MAR_2d_f4), np.ndarray[tuple[int, int], np.dtype[np.bool]] | np.bool) -assert_type(np.ma.getmask([1,2]), NDArray[np.bool] | np.bool) -assert_type(np.ma.getmask(np.int64(1)), np.bool) +assert_type(np.ma.getmask(MAR_2d_f4), np.ndarray[tuple[int, int], np.dtype[np.bool]] | _NoMaskType) +assert_type(np.ma.getmask([1, 2]), NDArray[np.bool] | _NoMaskType) +assert_type(np.ma.getmask(np.int64(1)), _NoMaskType) assert_type(np.ma.is_mask(MAR_1d), bool) assert_type(np.ma.is_mask(AR_b), bool) @@ -340,11 +372,20 @@ assert_type(MAR_c16.imag, MaskedArray[np.float64]) assert_type(MAR_2d_f4.baseclass, type[NDArray[Any]]) assert_type(MAR_b.swapaxes(0, 1), MaskedArray[np.bool]) -assert_type(MAR_2d_f4.swapaxes(1, 0), MaskedArray[np.float32]) +assert_type(MAR_2d_f4.swapaxes(1, 0), np.ma.MaskedArray[tuple[int, int], np.dtype[np.float32]]) + +assert_type(MAR_2d_f4[AR_i8], MaskedArray[np.float32]) +assert_type(MAR_2d_f4[[1, 2, 3]], MaskedArray[np.float32]) +assert_type(MAR_2d_f4[1:], MaskedArray[np.float32]) +assert_type(MAR_2d_f4[:], MaskedArray[np.float32]) +assert_type(MAR_2d_f4[0, 0], Any) +assert_type(MAR_2d_f4[:, np.newaxis], MaskedArray[np.float32]) +assert_type(MAR_2d_f4[..., -1], MaskedArray[np.float32]) +assert_type(MAR_2d_V["field_0"], np.ma.MaskedArray[tuple[int, int], np.dtype]) +assert_type(MAR_2d_V[["field_0", "field_1"]], np.ma.MaskedArray[tuple[int, int], np.dtype[np.void]]) assert_type(np.ma.nomask, np.bool[Literal[False]]) -# https://github.com/python/mypy/issues/18974 -assert_type(np.ma.MaskType, type[np.bool]) # type: ignore[assert-type] +assert_type(np.ma.MaskType, type[np.bool]) assert_type(MAR_1d.__setmask__([True, False]), None) assert_type(MAR_1d.__setmask__(np.False_), None) @@ -359,6 +400,16 @@ assert_type(MAR_b.shrink_mask(), MaskedArray[np.bool_]) assert_type(MAR_i8.hardmask, bool) assert_type(MAR_i8.sharedmask, bool) +assert_type(MAR_i8.recordmask, np.ma.MaskType | NDArray[np.ma.MaskType]) +assert_type(MAR_2d_f4.recordmask, np.ma.MaskType | np.ndarray[tuple[int, int], np.dtype[np.ma.MaskType]]) + +assert_type(MAR_2d_f4.anom(), np.ma.MaskedArray[tuple[int, int], np.dtype[np.float32]]) +assert_type(MAR_2d_f4.anom(axis=0, dtype=np.float16), np.ma.MaskedArray[tuple[int, int], np.dtype]) +assert_type(MAR_2d_f4.anom(0, np.float16), np.ma.MaskedArray[tuple[int, int], np.dtype]) +assert_type(MAR_2d_f4.anom(0, "float16"), np.ma.MaskedArray[tuple[int, int], np.dtype]) + +assert_type(MAR_i8.fill_value, np.int64) + assert_type(MAR_b.transpose(), MaskedArray[np.bool]) assert_type(MAR_2d_f4.transpose(), np.ma.MaskedArray[tuple[int, int], np.dtype[np.float32]]) assert_type(MAR_2d_f4.transpose(1, 0), np.ma.MaskedArray[tuple[int, int], np.dtype[np.float32]]) @@ -366,5 +417,680 @@ assert_type(MAR_2d_f4.transpose((1, 0)), np.ma.MaskedArray[tuple[int, int], np.d assert_type(MAR_b.T, MaskedArray[np.bool]) assert_type(MAR_2d_f4.T, np.ma.MaskedArray[tuple[int, int], np.dtype[np.float32]]) -assert_type(MAR_2d_f4.nonzero(), tuple[_Array1D[np.intp], *tuple[_Array1D[np.intp], ...]]) +assert_type(MAR_2d_f4.dot(1), MaskedArray[Any]) +assert_type(MAR_2d_f4.dot([1]), MaskedArray[Any]) +assert_type(MAR_2d_f4.dot(1, out=MAR_subclass), MaskedArraySubclassC) + +assert_type(MAR_2d_f4.nonzero(), tuple[_Array1D[np.intp], ...]) assert_type(MAR_2d_f4.nonzero()[0], _Array1D[np.intp]) + +assert_type(MAR_f8.trace(), Any) +assert_type(MAR_f8.trace(out=MAR_subclass), MaskedArraySubclassC) +assert_type(MAR_f8.trace(out=MAR_subclass, dtype=None), MaskedArraySubclassC) + +assert_type(MAR_f8.round(), MaskedArray[np.float64]) +assert_type(MAR_f8.round(out=MAR_subclass), MaskedArraySubclassC) + +assert_type(MAR_i8.reshape(None), MaskedArray[np.int64]) +assert_type(MAR_f8.reshape(-1), np.ma.MaskedArray[tuple[int], np.dtype[np.float64]]) +assert_type(MAR_c8.reshape(2, 3, 4, 5), np.ma.MaskedArray[tuple[int, int, int, int], np.dtype[np.complex64]]) +assert_type(MAR_td64.reshape(()), np.ma.MaskedArray[tuple[()], np.dtype[np.timedelta64]]) +assert_type(MAR_s.reshape([]), np.ma.MaskedArray[tuple[()], np.dtype[np.str_]]) +assert_type(MAR_V.reshape((480, 720, 4)), np.ma.MaskedArray[tuple[int, int, int], np.dtype[np.void]]) + +assert_type(MAR_f8.cumprod(), MaskedArray[Any]) +assert_type(MAR_f8.cumprod(out=MAR_subclass), MaskedArraySubclassC) + +assert_type(MAR_f8.cumsum(), MaskedArray[Any]) +assert_type(MAR_f8.cumsum(out=MAR_subclass), MaskedArraySubclassC) + +assert_type(MAR_f8.view(), MaskedArray[np.float64]) +assert_type(MAR_f8.view(dtype=np.float32), MaskedArray[np.float32]) +assert_type(MAR_f8.view(dtype=np.dtype(np.float32)), MaskedArray[np.float32]) +assert_type(MAR_f8.view(dtype=np.float32, fill_value=0), MaskedArray[np.float32]) +assert_type(MAR_f8.view(type=np.ndarray), np.ndarray[Any, Any]) +assert_type(MAR_f8.view(None, np.ndarray), np.ndarray[Any, Any]) +assert_type(MAR_f8.view(dtype=np.ndarray), np.ndarray[Any, Any]) +assert_type(MAR_f8.view(dtype="float32"), MaskedArray[Any]) +assert_type(MAR_f8.view(dtype="float32", type=np.ndarray), np.ndarray[Any, Any]) +assert_type(MAR_2d_f4.view(dtype=np.float16), np.ma.MaskedArray[tuple[int, int], np.dtype[np.float16]]) +assert_type(MAR_2d_f4.view(dtype=np.dtype(np.float16)), np.ma.MaskedArray[tuple[int, int], np.dtype[np.float16]]) + +assert_type(MAR_f8.__deepcopy__(), MaskedArray[np.float64]) + +assert_type(MAR_f8.argsort(), MaskedArray[np.intp]) +assert_type(MAR_f8.argsort(axis=0, kind="heap", order=("x", "y")), MaskedArray[np.intp]) +assert_type(MAR_f8.argsort(endwith=True, fill_value=1.5, stable=False), MaskedArray[np.intp]) + +assert_type(MAR_2d_f4.flat, np.ma.core.MaskedIterator[tuple[int, int], np.dtype[np.float32]]) +assert_type(MAR_2d_f4.flat.ma, np.ma.MaskedArray[tuple[int, int], np.dtype[np.float32]]) +assert_type(MAR_2d_f4.flat[AR_i8], MaskedArray[np.float32]) +assert_type(MAR_2d_f4.flat[[1, 2, 3]], MaskedArray[np.float32]) +assert_type(MAR_2d_f4.flat[1:], MaskedArray[np.float32]) +assert_type(MAR_2d_f4.flat[:], MaskedArray[np.float32]) +assert_type(MAR_2d_f4.flat[0, 0], Any) +assert_type(MAR_2d_f4.flat[:, np.newaxis], MaskedArray[np.float32]) +assert_type(MAR_2d_f4.flat[..., -1], MaskedArray[np.float32]) + +def invalid_resize() -> None: + assert_type(MAR_f8.resize((1, 1)), NoReturn) # type: ignore[arg-type] + +assert_type(np.ma.MaskedArray(AR_f4), MaskedArray[np.float32]) +assert_type(np.ma.MaskedArray(np.array([1, 2, 3]), [True, True, False], np.float16), MaskedArray[np.float16]) +assert_type(np.ma.MaskedArray(np.array([1, 2, 3]), dtype=np.float16), MaskedArray[np.float16]) +assert_type(np.ma.MaskedArray(np.array([1, 2, 3]), copy=True), MaskedArray[Any]) +# TODO: This one could be made more precise, the return type could be `MaskedArraySubclassC` +assert_type(np.ma.MaskedArray(MAR_subclass), MaskedArray[np.complex128]) +# TODO: This one could be made more precise, the return type could be `MaskedArraySubclass[np.float32]` +assert_type(np.ma.MaskedArray(MAR_into_subclass), MaskedArray[np.float32]) + +# Masked Array addition + +assert_type(MAR_b + AR_LIKE_u, MaskedArray[np.uint32]) +assert_type(MAR_b + AR_LIKE_i, MaskedArray[np.signedinteger]) +assert_type(MAR_b + AR_LIKE_f, MaskedArray[np.floating]) +assert_type(MAR_b + AR_LIKE_c, MaskedArray[np.complexfloating]) +assert_type(MAR_b + AR_LIKE_td64, MaskedArray[np.timedelta64]) +assert_type(MAR_b + AR_LIKE_o, Any) + +assert_type(AR_LIKE_u + MAR_b, MaskedArray[np.uint32]) +assert_type(AR_LIKE_i + MAR_b, MaskedArray[np.signedinteger]) +assert_type(AR_LIKE_f + MAR_b, MaskedArray[np.floating]) +assert_type(AR_LIKE_c + MAR_b, MaskedArray[np.complexfloating]) +assert_type(AR_LIKE_td64 + MAR_b, MaskedArray[np.timedelta64]) +assert_type(AR_LIKE_dt64 + MAR_b, MaskedArray[np.datetime64]) +assert_type(AR_LIKE_o + MAR_b, Any) + +assert_type(MAR_u4 + AR_LIKE_b, MaskedArray[np.uint32]) +assert_type(MAR_u4 + AR_LIKE_u, MaskedArray[np.unsignedinteger]) +assert_type(MAR_u4 + AR_LIKE_i, MaskedArray[np.signedinteger]) +assert_type(MAR_u4 + AR_LIKE_f, MaskedArray[np.floating]) +assert_type(MAR_u4 + AR_LIKE_c, MaskedArray[np.complexfloating]) +assert_type(MAR_u4 + AR_LIKE_td64, MaskedArray[np.timedelta64]) +assert_type(MAR_u4 + AR_LIKE_o, Any) + +assert_type(AR_LIKE_b + MAR_u4, MaskedArray[np.uint32]) +assert_type(AR_LIKE_u + MAR_u4, MaskedArray[np.unsignedinteger]) +assert_type(AR_LIKE_i + MAR_u4, MaskedArray[np.signedinteger]) +assert_type(AR_LIKE_f + MAR_u4, MaskedArray[np.floating]) +assert_type(AR_LIKE_c + MAR_u4, MaskedArray[np.complexfloating]) +assert_type(AR_LIKE_td64 + MAR_u4, MaskedArray[np.timedelta64]) +assert_type(AR_LIKE_dt64 + MAR_u4, MaskedArray[np.datetime64]) +assert_type(AR_LIKE_o + MAR_u4, Any) + +assert_type(MAR_i8 + AR_LIKE_b, MaskedArray[np.int64]) +assert_type(MAR_i8 + AR_LIKE_u, MaskedArray[np.signedinteger]) +assert_type(MAR_i8 + AR_LIKE_i, MaskedArray[np.signedinteger]) +assert_type(MAR_i8 + AR_LIKE_f, MaskedArray[np.floating]) +assert_type(MAR_i8 + AR_LIKE_c, MaskedArray[np.complexfloating]) +assert_type(MAR_i8 + AR_LIKE_td64, MaskedArray[np.timedelta64]) +assert_type(MAR_i8 + AR_LIKE_o, Any) + +assert_type(AR_LIKE_b + MAR_i8, MaskedArray[np.int64]) +assert_type(AR_LIKE_u + MAR_i8, MaskedArray[np.signedinteger]) +assert_type(AR_LIKE_i + MAR_i8, MaskedArray[np.signedinteger]) +assert_type(AR_LIKE_f + MAR_i8, MaskedArray[np.floating]) +assert_type(AR_LIKE_c + MAR_i8, MaskedArray[np.complexfloating]) +assert_type(AR_LIKE_td64 + MAR_i8, MaskedArray[np.timedelta64]) +assert_type(AR_LIKE_dt64 + MAR_i8, MaskedArray[np.datetime64]) +assert_type(AR_LIKE_o + MAR_i8, Any) + +assert_type(MAR_f8 + AR_LIKE_b, MaskedArray[np.float64]) +assert_type(MAR_f8 + AR_LIKE_u, MaskedArray[np.float64]) +assert_type(MAR_f8 + AR_LIKE_i, MaskedArray[np.float64]) +assert_type(MAR_f8 + AR_LIKE_f, MaskedArray[np.float64]) +assert_type(MAR_f8 + AR_LIKE_c, MaskedArray[np.complexfloating]) +assert_type(MAR_f8 + AR_LIKE_o, Any) + +assert_type(AR_LIKE_b + MAR_f8, MaskedArray[np.float64]) +assert_type(AR_LIKE_u + MAR_f8, MaskedArray[np.float64]) +assert_type(AR_LIKE_i + MAR_f8, MaskedArray[np.float64]) +assert_type(AR_LIKE_f + MAR_f8, MaskedArray[np.float64]) +assert_type(AR_LIKE_c + MAR_f8, MaskedArray[np.complexfloating]) +assert_type(AR_LIKE_o + MAR_f8, Any) + +assert_type(MAR_c16 + AR_LIKE_b, MaskedArray[np.complex128]) +assert_type(MAR_c16 + AR_LIKE_u, MaskedArray[np.complex128]) +assert_type(MAR_c16 + AR_LIKE_i, MaskedArray[np.complex128]) +assert_type(MAR_c16 + AR_LIKE_f, MaskedArray[np.complex128]) +assert_type(MAR_c16 + AR_LIKE_c, MaskedArray[np.complex128]) +assert_type(MAR_c16 + AR_LIKE_o, Any) + +assert_type(AR_LIKE_b + MAR_c16, MaskedArray[np.complex128]) +assert_type(AR_LIKE_u + MAR_c16, MaskedArray[np.complex128]) +assert_type(AR_LIKE_i + MAR_c16, MaskedArray[np.complex128]) +assert_type(AR_LIKE_f + MAR_c16, MaskedArray[np.complex128]) +assert_type(AR_LIKE_c + MAR_c16, MaskedArray[np.complex128]) +assert_type(AR_LIKE_o + MAR_c16, Any) + +assert_type(MAR_td64 + AR_LIKE_b, MaskedArray[np.timedelta64]) +assert_type(MAR_td64 + AR_LIKE_u, MaskedArray[np.timedelta64]) +assert_type(MAR_td64 + AR_LIKE_i, MaskedArray[np.timedelta64]) +assert_type(MAR_td64 + AR_LIKE_td64, MaskedArray[np.timedelta64]) +assert_type(MAR_td64 + AR_LIKE_o, Any) + +assert_type(AR_LIKE_b + MAR_td64, MaskedArray[np.timedelta64]) +assert_type(AR_LIKE_u + MAR_td64, MaskedArray[np.timedelta64]) +assert_type(AR_LIKE_i + MAR_td64, MaskedArray[np.timedelta64]) +assert_type(AR_LIKE_td64 + MAR_td64, MaskedArray[np.timedelta64]) +assert_type(AR_LIKE_dt64 + MAR_td64, MaskedArray[np.datetime64]) +assert_type(AR_LIKE_o + MAR_td64, Any) + +assert_type(MAR_dt64 + AR_LIKE_b, MaskedArray[np.datetime64]) +assert_type(MAR_dt64 + AR_LIKE_u, MaskedArray[np.datetime64]) +assert_type(MAR_dt64 + AR_LIKE_i, MaskedArray[np.datetime64]) +assert_type(MAR_dt64 + AR_LIKE_td64, MaskedArray[np.datetime64]) +assert_type(MAR_dt64 + AR_LIKE_o, Any) + +assert_type(AR_LIKE_o + MAR_dt64, Any) + +assert_type(MAR_o + AR_LIKE_b, Any) +assert_type(MAR_o + AR_LIKE_u, Any) +assert_type(MAR_o + AR_LIKE_i, Any) +assert_type(MAR_o + AR_LIKE_f, Any) +assert_type(MAR_o + AR_LIKE_c, Any) +assert_type(MAR_o + AR_LIKE_td64, Any) +assert_type(MAR_o + AR_LIKE_dt64, Any) +assert_type(MAR_o + AR_LIKE_o, Any) + +assert_type(AR_LIKE_b + MAR_o, Any) +assert_type(AR_LIKE_u + MAR_o, Any) +assert_type(AR_LIKE_i + MAR_o, Any) +assert_type(AR_LIKE_f + MAR_o, Any) +assert_type(AR_LIKE_c + MAR_o, Any) +assert_type(AR_LIKE_td64 + MAR_o, Any) +assert_type(AR_LIKE_dt64 + MAR_o, Any) +assert_type(AR_LIKE_o + MAR_o, Any) + +# Masked Array subtraction +# Keep in sync with numpy/typing/tests/data/reveal/arithmetic.pyi + +assert_type(MAR_number - AR_number, MaskedArray[np.number]) + +assert_type(MAR_b - AR_LIKE_u, MaskedArray[np.uint32]) +assert_type(MAR_b - AR_LIKE_i, MaskedArray[np.signedinteger]) +assert_type(MAR_b - AR_LIKE_f, MaskedArray[np.floating]) +assert_type(MAR_b - AR_LIKE_c, MaskedArray[np.complexfloating]) +assert_type(MAR_b - AR_LIKE_td64, MaskedArray[np.timedelta64]) +assert_type(MAR_b - AR_LIKE_o, Any) + +assert_type(AR_LIKE_u - MAR_b, MaskedArray[np.uint32]) +assert_type(AR_LIKE_i - MAR_b, MaskedArray[np.signedinteger]) +assert_type(AR_LIKE_f - MAR_b, MaskedArray[np.floating]) +assert_type(AR_LIKE_c - MAR_b, MaskedArray[np.complexfloating]) +assert_type(AR_LIKE_td64 - MAR_b, MaskedArray[np.timedelta64]) +assert_type(AR_LIKE_dt64 - MAR_b, MaskedArray[np.datetime64]) +assert_type(AR_LIKE_o - MAR_b, Any) + +assert_type(MAR_u4 - AR_LIKE_b, MaskedArray[np.uint32]) +assert_type(MAR_u4 - AR_LIKE_u, MaskedArray[np.unsignedinteger]) +assert_type(MAR_u4 - AR_LIKE_i, MaskedArray[np.signedinteger]) +assert_type(MAR_u4 - AR_LIKE_f, MaskedArray[np.floating]) +assert_type(MAR_u4 - AR_LIKE_c, MaskedArray[np.complexfloating]) +assert_type(MAR_u4 - AR_LIKE_td64, MaskedArray[np.timedelta64]) +assert_type(MAR_u4 - AR_LIKE_o, Any) + +assert_type(AR_LIKE_b - MAR_u4, MaskedArray[np.uint32]) +assert_type(AR_LIKE_u - MAR_u4, MaskedArray[np.unsignedinteger]) +assert_type(AR_LIKE_i - MAR_u4, MaskedArray[np.signedinteger]) +assert_type(AR_LIKE_f - MAR_u4, MaskedArray[np.floating]) +assert_type(AR_LIKE_c - MAR_u4, MaskedArray[np.complexfloating]) +assert_type(AR_LIKE_td64 - MAR_u4, MaskedArray[np.timedelta64]) +assert_type(AR_LIKE_dt64 - MAR_u4, MaskedArray[np.datetime64]) +assert_type(AR_LIKE_o - MAR_u4, Any) + +assert_type(MAR_i8 - AR_LIKE_b, MaskedArray[np.int64]) +assert_type(MAR_i8 - AR_LIKE_u, MaskedArray[np.signedinteger]) +assert_type(MAR_i8 - AR_LIKE_i, MaskedArray[np.signedinteger]) +assert_type(MAR_i8 - AR_LIKE_f, MaskedArray[np.floating]) +assert_type(MAR_i8 - AR_LIKE_c, MaskedArray[np.complexfloating]) +assert_type(MAR_i8 - AR_LIKE_td64, MaskedArray[np.timedelta64]) +assert_type(MAR_i8 - AR_LIKE_o, Any) + +assert_type(AR_LIKE_b - MAR_i8, MaskedArray[np.int64]) +assert_type(AR_LIKE_u - MAR_i8, MaskedArray[np.signedinteger]) +assert_type(AR_LIKE_i - MAR_i8, MaskedArray[np.signedinteger]) +assert_type(AR_LIKE_f - MAR_i8, MaskedArray[np.floating]) +assert_type(AR_LIKE_c - MAR_i8, MaskedArray[np.complexfloating]) +assert_type(AR_LIKE_td64 - MAR_i8, MaskedArray[np.timedelta64]) +assert_type(AR_LIKE_dt64 - MAR_i8, MaskedArray[np.datetime64]) +assert_type(AR_LIKE_o - MAR_i8, Any) + +assert_type(MAR_f8 - AR_LIKE_b, MaskedArray[np.float64]) +assert_type(MAR_f8 - AR_LIKE_u, MaskedArray[np.float64]) +assert_type(MAR_f8 - AR_LIKE_i, MaskedArray[np.float64]) +assert_type(MAR_f8 - AR_LIKE_f, MaskedArray[np.float64]) +assert_type(MAR_f8 - AR_LIKE_c, MaskedArray[np.complexfloating]) +assert_type(MAR_f8 - AR_LIKE_o, Any) + +assert_type(AR_LIKE_b - MAR_f8, MaskedArray[np.float64]) +assert_type(AR_LIKE_u - MAR_f8, MaskedArray[np.float64]) +assert_type(AR_LIKE_i - MAR_f8, MaskedArray[np.float64]) +assert_type(AR_LIKE_f - MAR_f8, MaskedArray[np.float64]) +assert_type(AR_LIKE_c - MAR_f8, MaskedArray[np.complexfloating]) +assert_type(AR_LIKE_o - MAR_f8, Any) + +assert_type(MAR_c16 - AR_LIKE_b, MaskedArray[np.complex128]) +assert_type(MAR_c16 - AR_LIKE_u, MaskedArray[np.complex128]) +assert_type(MAR_c16 - AR_LIKE_i, MaskedArray[np.complex128]) +assert_type(MAR_c16 - AR_LIKE_f, MaskedArray[np.complex128]) +assert_type(MAR_c16 - AR_LIKE_c, MaskedArray[np.complex128]) +assert_type(MAR_c16 - AR_LIKE_o, Any) + +assert_type(AR_LIKE_b - MAR_c16, MaskedArray[np.complex128]) +assert_type(AR_LIKE_u - MAR_c16, MaskedArray[np.complex128]) +assert_type(AR_LIKE_i - MAR_c16, MaskedArray[np.complex128]) +assert_type(AR_LIKE_f - MAR_c16, MaskedArray[np.complex128]) +assert_type(AR_LIKE_c - MAR_c16, MaskedArray[np.complex128]) +assert_type(AR_LIKE_o - MAR_c16, Any) + +assert_type(MAR_td64 - AR_LIKE_b, MaskedArray[np.timedelta64]) +assert_type(MAR_td64 - AR_LIKE_u, MaskedArray[np.timedelta64]) +assert_type(MAR_td64 - AR_LIKE_i, MaskedArray[np.timedelta64]) +assert_type(MAR_td64 - AR_LIKE_td64, MaskedArray[np.timedelta64]) +assert_type(MAR_td64 - AR_LIKE_o, Any) + +assert_type(AR_LIKE_b - MAR_td64, MaskedArray[np.timedelta64]) +assert_type(AR_LIKE_u - MAR_td64, MaskedArray[np.timedelta64]) +assert_type(AR_LIKE_i - MAR_td64, MaskedArray[np.timedelta64]) +assert_type(AR_LIKE_td64 - MAR_td64, MaskedArray[np.timedelta64]) +assert_type(AR_LIKE_dt64 - MAR_td64, MaskedArray[np.datetime64]) +assert_type(AR_LIKE_o - MAR_td64, Any) + +assert_type(MAR_dt64 - AR_LIKE_b, MaskedArray[np.datetime64]) +assert_type(MAR_dt64 - AR_LIKE_u, MaskedArray[np.datetime64]) +assert_type(MAR_dt64 - AR_LIKE_i, MaskedArray[np.datetime64]) +assert_type(MAR_dt64 - AR_LIKE_td64, MaskedArray[np.datetime64]) +assert_type(MAR_dt64 - AR_LIKE_dt64, MaskedArray[np.timedelta64]) +assert_type(MAR_dt64 - AR_LIKE_o, Any) + +assert_type(AR_LIKE_dt64 - MAR_dt64, MaskedArray[np.timedelta64]) +assert_type(AR_LIKE_o - MAR_dt64, Any) + +assert_type(MAR_o - AR_LIKE_b, Any) +assert_type(MAR_o - AR_LIKE_u, Any) +assert_type(MAR_o - AR_LIKE_i, Any) +assert_type(MAR_o - AR_LIKE_f, Any) +assert_type(MAR_o - AR_LIKE_c, Any) +assert_type(MAR_o - AR_LIKE_td64, Any) +assert_type(MAR_o - AR_LIKE_dt64, Any) +assert_type(MAR_o - AR_LIKE_o, Any) + +assert_type(AR_LIKE_b - MAR_o, Any) +assert_type(AR_LIKE_u - MAR_o, Any) +assert_type(AR_LIKE_i - MAR_o, Any) +assert_type(AR_LIKE_f - MAR_o, Any) +assert_type(AR_LIKE_c - MAR_o, Any) +assert_type(AR_LIKE_td64 - MAR_o, Any) +assert_type(AR_LIKE_dt64 - MAR_o, Any) +assert_type(AR_LIKE_o - MAR_o, Any) + +# Masked Array multiplication + +assert_type(MAR_b * AR_LIKE_u, MaskedArray[np.uint32]) +assert_type(MAR_b * AR_LIKE_i, MaskedArray[np.signedinteger]) +assert_type(MAR_b * AR_LIKE_f, MaskedArray[np.floating]) +assert_type(MAR_b * AR_LIKE_c, MaskedArray[np.complexfloating]) +assert_type(MAR_b * AR_LIKE_td64, MaskedArray[np.timedelta64]) +assert_type(MAR_b * AR_LIKE_o, Any) + +# Ignore due to https://github.com/python/mypy/issues/19341 +assert_type(AR_LIKE_u * MAR_b, MaskedArray[np.uint32]) # type: ignore[assert-type] +assert_type(AR_LIKE_i * MAR_b, MaskedArray[np.signedinteger]) # type: ignore[assert-type] +assert_type(AR_LIKE_f * MAR_b, MaskedArray[np.floating]) # type: ignore[assert-type] +assert_type(AR_LIKE_c * MAR_b, MaskedArray[np.complexfloating]) # type: ignore[assert-type] +assert_type(AR_LIKE_td64 * MAR_b, MaskedArray[np.timedelta64]) # type: ignore[assert-type] +assert_type(AR_LIKE_o * MAR_b, Any) # type: ignore[assert-type] + +assert_type(MAR_u4 * AR_LIKE_b, MaskedArray[np.uint32]) +assert_type(MAR_u4 * AR_LIKE_u, MaskedArray[np.unsignedinteger]) +assert_type(MAR_u4 * AR_LIKE_i, MaskedArray[np.signedinteger]) +assert_type(MAR_u4 * AR_LIKE_f, MaskedArray[np.floating]) +assert_type(MAR_u4 * AR_LIKE_c, MaskedArray[np.complexfloating]) +assert_type(MAR_u4 * AR_LIKE_td64, MaskedArray[np.timedelta64]) +assert_type(MAR_u4 * AR_LIKE_o, Any) + +assert_type(MAR_i8 * AR_LIKE_b, MaskedArray[np.int64]) +assert_type(MAR_i8 * AR_LIKE_u, MaskedArray[np.signedinteger]) +assert_type(MAR_i8 * AR_LIKE_i, MaskedArray[np.signedinteger]) +assert_type(MAR_i8 * AR_LIKE_f, MaskedArray[np.floating]) +assert_type(MAR_i8 * AR_LIKE_c, MaskedArray[np.complexfloating]) +assert_type(MAR_i8 * AR_LIKE_td64, MaskedArray[np.timedelta64]) +assert_type(MAR_i8 * AR_LIKE_o, Any) + +assert_type(MAR_f8 * AR_LIKE_b, MaskedArray[np.float64]) +assert_type(MAR_f8 * AR_LIKE_u, MaskedArray[np.float64]) +assert_type(MAR_f8 * AR_LIKE_i, MaskedArray[np.float64]) +assert_type(MAR_f8 * AR_LIKE_f, MaskedArray[np.float64]) +assert_type(MAR_f8 * AR_LIKE_c, MaskedArray[np.complexfloating]) +assert_type(MAR_f8 * AR_LIKE_o, Any) + +# Ignore due to https://github.com/python/mypy/issues/19341 +assert_type(AR_LIKE_b * MAR_f8, MaskedArray[np.float64]) # type: ignore[assert-type] +assert_type(AR_LIKE_u * MAR_f8, MaskedArray[np.float64]) # type: ignore[assert-type] +assert_type(AR_LIKE_i * MAR_f8, MaskedArray[np.float64]) # type: ignore[assert-type] +assert_type(AR_LIKE_f * MAR_f8, MaskedArray[np.float64]) # type: ignore[assert-type] +assert_type(AR_LIKE_c * MAR_f8, MaskedArray[np.complexfloating]) # type: ignore[assert-type] +assert_type(AR_LIKE_o * MAR_f8, Any) # type: ignore[assert-type] + +assert_type(MAR_c16 * AR_LIKE_b, MaskedArray[np.complex128]) +assert_type(MAR_c16 * AR_LIKE_u, MaskedArray[np.complex128]) +assert_type(MAR_c16 * AR_LIKE_i, MaskedArray[np.complex128]) +assert_type(MAR_c16 * AR_LIKE_f, MaskedArray[np.complex128]) +assert_type(MAR_c16 * AR_LIKE_c, MaskedArray[np.complex128]) +assert_type(MAR_c16 * AR_LIKE_o, Any) + +# Ignore due to https://github.com/python/mypy/issues/19341 +assert_type(AR_LIKE_b * MAR_c16, MaskedArray[np.complex128]) # type: ignore[assert-type] +assert_type(AR_LIKE_u * MAR_c16, MaskedArray[np.complex128]) # type: ignore[assert-type] +assert_type(AR_LIKE_i * MAR_c16, MaskedArray[np.complex128]) # type: ignore[assert-type] +assert_type(AR_LIKE_f * MAR_c16, MaskedArray[np.complex128]) # type: ignore[assert-type] +assert_type(AR_LIKE_c * MAR_c16, MaskedArray[np.complex128]) # type: ignore[assert-type] +assert_type(AR_LIKE_o * MAR_c16, Any) # type: ignore[assert-type] + +assert_type(MAR_td64 * AR_LIKE_b, MaskedArray[np.timedelta64]) +assert_type(MAR_td64 * AR_LIKE_u, MaskedArray[np.timedelta64]) +assert_type(MAR_td64 * AR_LIKE_i, MaskedArray[np.timedelta64]) +assert_type(MAR_td64 * AR_LIKE_o, Any) + +# Ignore due to https://github.com/python/mypy/issues/19341 +assert_type(AR_LIKE_b * MAR_td64, MaskedArray[np.timedelta64]) # type: ignore[assert-type] +assert_type(AR_LIKE_u * MAR_td64, MaskedArray[np.timedelta64]) # type: ignore[assert-type] +assert_type(AR_LIKE_i * MAR_td64, MaskedArray[np.timedelta64]) # type: ignore[assert-type] +assert_type(AR_LIKE_td64 * MAR_td64, MaskedArray[np.timedelta64]) # type: ignore[assert-type] +assert_type(AR_LIKE_dt64 * MAR_td64, MaskedArray[np.datetime64]) # type: ignore[assert-type] +assert_type(AR_LIKE_o * MAR_td64, Any) # type: ignore[assert-type] + +assert_type(AR_LIKE_o * MAR_dt64, Any) # type: ignore[assert-type] + +assert_type(MAR_o * AR_LIKE_b, Any) +assert_type(MAR_o * AR_LIKE_u, Any) +assert_type(MAR_o * AR_LIKE_i, Any) +assert_type(MAR_o * AR_LIKE_f, Any) +assert_type(MAR_o * AR_LIKE_c, Any) +assert_type(MAR_o * AR_LIKE_td64, Any) +assert_type(MAR_o * AR_LIKE_dt64, Any) +assert_type(MAR_o * AR_LIKE_o, Any) + +# Ignore due to https://github.com/python/mypy/issues/19341 +assert_type(AR_LIKE_b * MAR_o, Any) # type: ignore[assert-type] +assert_type(AR_LIKE_u * MAR_o, Any) # type: ignore[assert-type] +assert_type(AR_LIKE_i * MAR_o, Any) # type: ignore[assert-type] +assert_type(AR_LIKE_f * MAR_o, Any) # type: ignore[assert-type] +assert_type(AR_LIKE_c * MAR_o, Any) # type: ignore[assert-type] +assert_type(AR_LIKE_td64 * MAR_o, Any) # type: ignore[assert-type] +assert_type(AR_LIKE_dt64 * MAR_o, Any) # type: ignore[assert-type] +assert_type(AR_LIKE_o * MAR_o, Any) # type: ignore[assert-type] + +assert_type(MAR_f8.sum(), Any) +assert_type(MAR_f8.sum(axis=0), Any) +assert_type(MAR_f8.sum(keepdims=True), Any) +assert_type(MAR_f8.sum(out=MAR_subclass), MaskedArraySubclassC) + +assert_type(MAR_f8.std(), Any) +assert_type(MAR_f8.std(axis=0), Any) +assert_type(MAR_f8.std(keepdims=True, mean=0.), Any) +assert_type(MAR_f8.std(out=MAR_subclass), MaskedArraySubclassC) + +assert_type(MAR_f8.var(), Any) +assert_type(MAR_f8.var(axis=0), Any) +assert_type(MAR_f8.var(keepdims=True, mean=0.), Any) +assert_type(MAR_f8.var(out=MAR_subclass), MaskedArraySubclassC) + +assert_type(MAR_f8.mean(), Any) +assert_type(MAR_f8.mean(axis=0), Any) +assert_type(MAR_f8.mean(keepdims=True), Any) +assert_type(MAR_f8.mean(out=MAR_subclass), MaskedArraySubclassC) + +assert_type(MAR_f8.prod(), Any) +assert_type(MAR_f8.prod(axis=0), Any) +assert_type(MAR_f8.prod(keepdims=True), Any) +assert_type(MAR_f8.prod(out=MAR_subclass), MaskedArraySubclassC) + +# MaskedArray "true" division + +assert_type(MAR_f8 / b, MaskedArray[np.float64]) +assert_type(MAR_f8 / i, MaskedArray[np.float64]) +assert_type(MAR_f8 / f, MaskedArray[np.float64]) + +assert_type(b / MAR_f8, MaskedArray[np.float64]) +assert_type(i / MAR_f8, MaskedArray[np.float64]) +assert_type(f / MAR_f8, MaskedArray[np.float64]) + +assert_type(MAR_b / AR_LIKE_b, MaskedArray[np.float64]) +assert_type(MAR_b / AR_LIKE_u, MaskedArray[np.float64]) +assert_type(MAR_b / AR_LIKE_i, MaskedArray[np.float64]) +assert_type(MAR_b / AR_LIKE_f, MaskedArray[np.float64]) +assert_type(MAR_b / AR_LIKE_o, Any) + +assert_type(AR_LIKE_b / MAR_b, MaskedArray[np.float64]) +assert_type(AR_LIKE_u / MAR_b, MaskedArray[np.float64]) +assert_type(AR_LIKE_i / MAR_b, MaskedArray[np.float64]) +assert_type(AR_LIKE_f / MAR_b, MaskedArray[np.float64]) +assert_type(AR_LIKE_o / MAR_b, Any) + +assert_type(MAR_u4 / AR_LIKE_b, MaskedArray[np.float64]) +assert_type(MAR_u4 / AR_LIKE_u, MaskedArray[np.float64]) +assert_type(MAR_u4 / AR_LIKE_i, MaskedArray[np.float64]) +assert_type(MAR_u4 / AR_LIKE_f, MaskedArray[np.float64]) +assert_type(MAR_u4 / AR_LIKE_o, Any) + +assert_type(AR_LIKE_b / MAR_u4, MaskedArray[np.float64]) +assert_type(AR_LIKE_u / MAR_u4, MaskedArray[np.float64]) +assert_type(AR_LIKE_i / MAR_u4, MaskedArray[np.float64]) +assert_type(AR_LIKE_f / MAR_u4, MaskedArray[np.float64]) +assert_type(AR_LIKE_td64 / MAR_u4, MaskedArray[np.timedelta64]) +assert_type(AR_LIKE_o / MAR_u4, Any) + +assert_type(MAR_i8 / AR_LIKE_b, MaskedArray[np.float64]) +assert_type(MAR_i8 / AR_LIKE_u, MaskedArray[np.float64]) +assert_type(MAR_i8 / AR_LIKE_i, MaskedArray[np.float64]) +assert_type(MAR_i8 / AR_LIKE_f, MaskedArray[np.float64]) +assert_type(MAR_i8 / AR_LIKE_o, Any) + +assert_type(AR_LIKE_b / MAR_i8, MaskedArray[np.float64]) +assert_type(AR_LIKE_u / MAR_i8, MaskedArray[np.float64]) +assert_type(AR_LIKE_i / MAR_i8, MaskedArray[np.float64]) +assert_type(AR_LIKE_f / MAR_i8, MaskedArray[np.float64]) +assert_type(AR_LIKE_td64 / MAR_i8, MaskedArray[np.timedelta64]) +assert_type(AR_LIKE_o / MAR_i8, Any) + +assert_type(MAR_f8 / AR_LIKE_b, MaskedArray[np.float64]) +assert_type(MAR_f8 / AR_LIKE_u, MaskedArray[np.float64]) +assert_type(MAR_f8 / AR_LIKE_i, MaskedArray[np.float64]) +assert_type(MAR_f8 / AR_LIKE_f, MaskedArray[np.float64]) +assert_type(MAR_f8 / AR_LIKE_o, Any) + +assert_type(AR_LIKE_b / MAR_f8, MaskedArray[np.float64]) +assert_type(AR_LIKE_u / MAR_f8, MaskedArray[np.float64]) +assert_type(AR_LIKE_i / MAR_f8, MaskedArray[np.float64]) +assert_type(AR_LIKE_f / MAR_f8, MaskedArray[np.float64]) +assert_type(AR_LIKE_td64 / MAR_f8, MaskedArray[np.timedelta64]) +assert_type(AR_LIKE_o / MAR_f8, Any) + +assert_type(MAR_td64 / AR_LIKE_u, MaskedArray[np.timedelta64]) +assert_type(MAR_td64 / AR_LIKE_i, MaskedArray[np.timedelta64]) +assert_type(MAR_td64 / AR_LIKE_f, MaskedArray[np.timedelta64]) +assert_type(MAR_td64 / AR_LIKE_td64, MaskedArray[np.float64]) +assert_type(MAR_td64 / AR_LIKE_o, Any) + +assert_type(AR_LIKE_td64 / MAR_td64, MaskedArray[np.float64]) +assert_type(AR_LIKE_o / MAR_td64, Any) + +assert_type(MAR_o / AR_LIKE_b, Any) +assert_type(MAR_o / AR_LIKE_u, Any) +assert_type(MAR_o / AR_LIKE_i, Any) +assert_type(MAR_o / AR_LIKE_f, Any) +assert_type(MAR_o / AR_LIKE_td64, Any) +assert_type(MAR_o / AR_LIKE_dt64, Any) +assert_type(MAR_o / AR_LIKE_o, Any) + +assert_type(AR_LIKE_b / MAR_o, Any) +assert_type(AR_LIKE_u / MAR_o, Any) +assert_type(AR_LIKE_i / MAR_o, Any) +assert_type(AR_LIKE_f / MAR_o, Any) +assert_type(AR_LIKE_td64 / MAR_o, Any) +assert_type(AR_LIKE_dt64 / MAR_o, Any) +assert_type(AR_LIKE_o / MAR_o, Any) + +# MaskedArray floor division + +assert_type(MAR_b // AR_LIKE_b, MaskedArray[np.int8]) +assert_type(MAR_b // AR_LIKE_u, MaskedArray[np.uint32]) +assert_type(MAR_b // AR_LIKE_i, MaskedArray[np.signedinteger]) +assert_type(MAR_b // AR_LIKE_f, MaskedArray[np.floating]) +assert_type(MAR_b // AR_LIKE_o, Any) + +assert_type(AR_LIKE_b // MAR_b, MaskedArray[np.int8]) +assert_type(AR_LIKE_u // MAR_b, MaskedArray[np.uint32]) +assert_type(AR_LIKE_i // MAR_b, MaskedArray[np.signedinteger]) +assert_type(AR_LIKE_f // MAR_b, MaskedArray[np.floating]) +assert_type(AR_LIKE_o // MAR_b, Any) + +assert_type(MAR_u4 // AR_LIKE_b, MaskedArray[np.uint32]) +assert_type(MAR_u4 // AR_LIKE_u, MaskedArray[np.unsignedinteger]) +assert_type(MAR_u4 // AR_LIKE_i, MaskedArray[np.signedinteger]) +assert_type(MAR_u4 // AR_LIKE_f, MaskedArray[np.floating]) +assert_type(MAR_u4 // AR_LIKE_o, Any) + +assert_type(AR_LIKE_b // MAR_u4, MaskedArray[np.uint32]) +assert_type(AR_LIKE_u // MAR_u4, MaskedArray[np.unsignedinteger]) +assert_type(AR_LIKE_i // MAR_u4, MaskedArray[np.signedinteger]) +assert_type(AR_LIKE_f // MAR_u4, MaskedArray[np.floating]) +assert_type(AR_LIKE_td64 // MAR_u4, MaskedArray[np.timedelta64]) +assert_type(AR_LIKE_o // MAR_u4, Any) + +assert_type(MAR_i8 // AR_LIKE_b, MaskedArray[np.int64]) +assert_type(MAR_i8 // AR_LIKE_u, MaskedArray[np.signedinteger]) +assert_type(MAR_i8 // AR_LIKE_i, MaskedArray[np.signedinteger]) +assert_type(MAR_i8 // AR_LIKE_f, MaskedArray[np.floating]) +assert_type(MAR_i8 // AR_LIKE_o, Any) + +assert_type(AR_LIKE_b // MAR_i8, MaskedArray[np.int64]) +assert_type(AR_LIKE_u // MAR_i8, MaskedArray[np.signedinteger]) +assert_type(AR_LIKE_i // MAR_i8, MaskedArray[np.signedinteger]) +assert_type(AR_LIKE_f // MAR_i8, MaskedArray[np.floating]) +assert_type(AR_LIKE_td64 // MAR_i8, MaskedArray[np.timedelta64]) +assert_type(AR_LIKE_o // MAR_i8, Any) + +assert_type(MAR_f8 // AR_LIKE_b, MaskedArray[np.float64]) +assert_type(MAR_f8 // AR_LIKE_u, MaskedArray[np.float64]) +assert_type(MAR_f8 // AR_LIKE_i, MaskedArray[np.float64]) +assert_type(MAR_f8 // AR_LIKE_f, MaskedArray[np.float64]) +assert_type(MAR_f8 // AR_LIKE_o, Any) + +assert_type(AR_LIKE_b // MAR_f8, MaskedArray[np.float64]) +assert_type(AR_LIKE_u // MAR_f8, MaskedArray[np.float64]) +assert_type(AR_LIKE_i // MAR_f8, MaskedArray[np.float64]) +assert_type(AR_LIKE_f // MAR_f8, MaskedArray[np.float64]) +assert_type(AR_LIKE_td64 // MAR_f8, MaskedArray[np.timedelta64]) +assert_type(AR_LIKE_o // MAR_f8, Any) + +assert_type(MAR_td64 // AR_LIKE_u, MaskedArray[np.timedelta64]) +assert_type(MAR_td64 // AR_LIKE_i, MaskedArray[np.timedelta64]) +assert_type(MAR_td64 // AR_LIKE_f, MaskedArray[np.timedelta64]) +assert_type(MAR_td64 // AR_LIKE_td64, MaskedArray[np.int64]) +assert_type(MAR_td64 // AR_LIKE_o, Any) + +assert_type(AR_LIKE_td64 // MAR_td64, MaskedArray[np.int64]) +assert_type(AR_LIKE_o // MAR_td64, Any) + +assert_type(MAR_o // AR_LIKE_b, Any) +assert_type(MAR_o // AR_LIKE_u, Any) +assert_type(MAR_o // AR_LIKE_i, Any) +assert_type(MAR_o // AR_LIKE_f, Any) +assert_type(MAR_o // AR_LIKE_td64, Any) +assert_type(MAR_o // AR_LIKE_dt64, Any) +assert_type(MAR_o // AR_LIKE_o, Any) + +assert_type(AR_LIKE_b // MAR_o, Any) +assert_type(AR_LIKE_u // MAR_o, Any) +assert_type(AR_LIKE_i // MAR_o, Any) +assert_type(AR_LIKE_f // MAR_o, Any) +assert_type(AR_LIKE_td64 // MAR_o, Any) +assert_type(AR_LIKE_dt64 // MAR_o, Any) +assert_type(AR_LIKE_o // MAR_o, Any) + +# Masked Array power + +assert_type(MAR_b ** AR_LIKE_u, MaskedArray[np.uint32]) +assert_type(MAR_b ** AR_LIKE_i, MaskedArray[np.signedinteger]) +assert_type(MAR_b ** AR_LIKE_f, MaskedArray[np.floating]) +assert_type(MAR_b ** AR_LIKE_c, MaskedArray[np.complexfloating]) +assert_type(MAR_b ** AR_LIKE_o, Any) + +assert_type(AR_LIKE_u ** MAR_b, MaskedArray[np.uint32]) +assert_type(AR_LIKE_i ** MAR_b, MaskedArray[np.signedinteger]) +assert_type(AR_LIKE_f ** MAR_b, MaskedArray[np.floating]) +assert_type(AR_LIKE_c ** MAR_b, MaskedArray[np.complexfloating]) +assert_type(AR_LIKE_o ** MAR_b, Any) + +assert_type(MAR_u4 ** AR_LIKE_b, MaskedArray[np.uint32]) +assert_type(MAR_u4 ** AR_LIKE_u, MaskedArray[np.unsignedinteger]) +assert_type(MAR_u4 ** AR_LIKE_i, MaskedArray[np.signedinteger]) +assert_type(MAR_u4 ** AR_LIKE_f, MaskedArray[np.floating]) +assert_type(MAR_u4 ** AR_LIKE_c, MaskedArray[np.complexfloating]) +assert_type(MAR_u4 ** AR_LIKE_o, Any) + +assert_type(AR_LIKE_b ** MAR_u4, MaskedArray[np.uint32]) +assert_type(AR_LIKE_u ** MAR_u4, MaskedArray[np.unsignedinteger]) +assert_type(AR_LIKE_i ** MAR_u4, MaskedArray[np.signedinteger]) +assert_type(AR_LIKE_f ** MAR_u4, MaskedArray[np.floating]) +assert_type(AR_LIKE_c ** MAR_u4, MaskedArray[np.complexfloating]) +assert_type(AR_LIKE_o ** MAR_u4, Any) + +assert_type(MAR_i8 ** AR_LIKE_b, MaskedArray[np.int64]) +assert_type(MAR_i8 ** AR_LIKE_u, MaskedArray[np.signedinteger]) +assert_type(MAR_i8 ** AR_LIKE_i, MaskedArray[np.signedinteger]) +assert_type(MAR_i8 ** AR_LIKE_f, MaskedArray[np.floating]) +assert_type(MAR_i8 ** AR_LIKE_c, MaskedArray[np.complexfloating]) +assert_type(MAR_i8 ** AR_LIKE_o, Any) +assert_type(MAR_i8 ** AR_LIKE_b, MaskedArray[np.int64]) + +assert_type(AR_LIKE_u ** MAR_i8, MaskedArray[np.signedinteger]) +assert_type(AR_LIKE_i ** MAR_i8, MaskedArray[np.signedinteger]) +assert_type(AR_LIKE_f ** MAR_i8, MaskedArray[np.floating]) +assert_type(AR_LIKE_c ** MAR_i8, MaskedArray[np.complexfloating]) +assert_type(AR_LIKE_o ** MAR_i8, Any) + +assert_type(MAR_f8 ** AR_LIKE_b, MaskedArray[np.float64]) +assert_type(MAR_f8 ** AR_LIKE_u, MaskedArray[np.float64]) +assert_type(MAR_f8 ** AR_LIKE_i, MaskedArray[np.float64]) +assert_type(MAR_f8 ** AR_LIKE_f, MaskedArray[np.float64]) +assert_type(MAR_f8 ** AR_LIKE_c, MaskedArray[np.complexfloating]) +assert_type(MAR_f8 ** AR_LIKE_o, Any) + +assert_type(AR_LIKE_b ** MAR_f8, MaskedArray[np.float64]) +assert_type(AR_LIKE_u ** MAR_f8, MaskedArray[np.float64]) +assert_type(AR_LIKE_i ** MAR_f8, MaskedArray[np.float64]) +assert_type(AR_LIKE_f ** MAR_f8, MaskedArray[np.float64]) +assert_type(AR_LIKE_c ** MAR_f8, MaskedArray[np.complexfloating]) +assert_type(AR_LIKE_o ** MAR_f8, Any) + +assert_type(MAR_c16 ** AR_LIKE_b, MaskedArray[np.complex128]) +assert_type(MAR_c16 ** AR_LIKE_u, MaskedArray[np.complex128]) +assert_type(MAR_c16 ** AR_LIKE_i, MaskedArray[np.complex128]) +assert_type(MAR_c16 ** AR_LIKE_f, MaskedArray[np.complex128]) +assert_type(MAR_c16 ** AR_LIKE_c, MaskedArray[np.complex128]) +assert_type(MAR_c16 ** AR_LIKE_o, Any) + +assert_type(AR_LIKE_b ** MAR_c16, MaskedArray[np.complex128]) +assert_type(AR_LIKE_u ** MAR_c16, MaskedArray[np.complex128]) +assert_type(AR_LIKE_i ** MAR_c16, MaskedArray[np.complex128]) +assert_type(AR_LIKE_f ** MAR_c16, MaskedArray[np.complex128]) +assert_type(AR_LIKE_c ** MAR_c16, MaskedArray[np.complex128]) +assert_type(AR_LIKE_o ** MAR_c16, Any) + +assert_type(MAR_o ** AR_LIKE_b, Any) +assert_type(MAR_o ** AR_LIKE_u, Any) +assert_type(MAR_o ** AR_LIKE_i, Any) +assert_type(MAR_o ** AR_LIKE_f, Any) +assert_type(MAR_o ** AR_LIKE_c, Any) +assert_type(MAR_o ** AR_LIKE_o, Any) + +assert_type(AR_LIKE_b ** MAR_o, Any) +assert_type(AR_LIKE_u ** MAR_o, Any) +assert_type(AR_LIKE_i ** MAR_o, Any) +assert_type(AR_LIKE_f ** MAR_o, Any) +assert_type(AR_LIKE_c ** MAR_o, Any) +assert_type(AR_LIKE_o ** MAR_o, Any) diff --git a/numpy/typing/tests/data/reveal/matrix.pyi b/numpy/typing/tests/data/reveal/matrix.pyi index 1a7285d428cc..b76760d547b9 100644 --- a/numpy/typing/tests/data/reveal/matrix.pyi +++ b/numpy/typing/tests/data/reveal/matrix.pyi @@ -1,19 +1,19 @@ -from typing import Any, TypeAlias, assert_type +from typing import Any, assert_type import numpy as np import numpy.typing as npt -_Shape2D: TypeAlias = tuple[int, int] +type _Shape2D = tuple[int, int] mat: np.matrix[_Shape2D, np.dtype[np.int64]] ar_f8: npt.NDArray[np.float64] ar_ip: npt.NDArray[np.intp] -assert_type(mat * 5, np.matrix[_Shape2D, Any]) -assert_type(5 * mat, np.matrix[_Shape2D, Any]) +assert_type(mat * 5, np.matrix) +assert_type(5 * mat, np.matrix) mat *= 5 -assert_type(mat**5, np.matrix[_Shape2D, Any]) +assert_type(mat**5, np.matrix) mat **= 5 assert_type(mat.sum(), Any) @@ -29,11 +29,11 @@ assert_type(mat.argmax(), np.intp) assert_type(mat.argmin(), np.intp) assert_type(mat.ptp(), np.int64) -assert_type(mat.sum(axis=0), np.matrix[_Shape2D, Any]) -assert_type(mat.mean(axis=0), np.matrix[_Shape2D, Any]) -assert_type(mat.std(axis=0), np.matrix[_Shape2D, Any]) -assert_type(mat.var(axis=0), np.matrix[_Shape2D, Any]) -assert_type(mat.prod(axis=0), np.matrix[_Shape2D, Any]) +assert_type(mat.sum(axis=0), np.matrix) +assert_type(mat.mean(axis=0), np.matrix) +assert_type(mat.std(axis=0), np.matrix) +assert_type(mat.var(axis=0), np.matrix) +assert_type(mat.prod(axis=0), np.matrix) assert_type(mat.any(axis=0), np.matrix[_Shape2D, np.dtype[np.bool]]) assert_type(mat.all(axis=0), np.matrix[_Shape2D, np.dtype[np.bool]]) assert_type(mat.max(axis=0), np.matrix[_Shape2D, np.dtype[np.int64]]) @@ -56,18 +56,18 @@ assert_type(mat.argmin(out=ar_ip), npt.NDArray[np.intp]) assert_type(mat.ptp(out=ar_f8), npt.NDArray[np.float64]) assert_type(mat.T, np.matrix[_Shape2D, np.dtype[np.int64]]) -assert_type(mat.I, np.matrix[_Shape2D, Any]) +assert_type(mat.I, np.matrix) assert_type(mat.A, np.ndarray[_Shape2D, np.dtype[np.int64]]) assert_type(mat.A1, npt.NDArray[np.int64]) assert_type(mat.H, np.matrix[_Shape2D, np.dtype[np.int64]]) assert_type(mat.getT(), np.matrix[_Shape2D, np.dtype[np.int64]]) -assert_type(mat.getI(), np.matrix[_Shape2D, Any]) +assert_type(mat.getI(), np.matrix) assert_type(mat.getA(), np.ndarray[_Shape2D, np.dtype[np.int64]]) assert_type(mat.getA1(), npt.NDArray[np.int64]) assert_type(mat.getH(), np.matrix[_Shape2D, np.dtype[np.int64]]) -assert_type(np.bmat(ar_f8), np.matrix[_Shape2D, Any]) -assert_type(np.bmat([[0, 1, 2]]), np.matrix[_Shape2D, Any]) -assert_type(np.bmat("mat"), np.matrix[_Shape2D, Any]) +assert_type(np.bmat(ar_f8), np.matrix) +assert_type(np.bmat([[0, 1, 2]]), np.matrix) +assert_type(np.bmat("mat"), np.matrix) -assert_type(np.asmatrix(ar_f8, dtype=np.int64), np.matrix[_Shape2D, Any]) +assert_type(np.asmatrix(ar_f8, dtype=np.int64), np.matrix) diff --git a/numpy/typing/tests/data/reveal/mod.pyi b/numpy/typing/tests/data/reveal/mod.pyi index 59a6a1016479..131e9259b6b5 100644 --- a/numpy/typing/tests/data/reveal/mod.pyi +++ b/numpy/typing/tests/data/reveal/mod.pyi @@ -1,10 +1,8 @@ import datetime as dt -from typing import Literal as L -from typing import assert_type +from typing import Literal as L, assert_type import numpy as np import numpy.typing as npt -from numpy._typing import _64Bit f8: np.float64 i8: np.int64 @@ -29,13 +27,16 @@ f: float AR_b: npt.NDArray[np.bool] AR_m: npt.NDArray[np.timedelta64] +# NOTE: the __divmod__ calls are workarounds for https://github.com/microsoft/pyright/issues/9663 + # Time structures assert_type(m % m, np.timedelta64) assert_type(m % m_nat, np.timedelta64[None]) assert_type(m % m_int0, np.timedelta64[None]) assert_type(m % m_int, np.timedelta64[int | None]) -assert_type(m_nat % m, np.timedelta64[None]) +# NOTE: Mypy incorrectly infers `timedelta64[Any]`, but pyright behaves correctly. +assert_type(m_nat % m, np.timedelta64[None]) # type: ignore[assert-type] assert_type(m_int % m_nat, np.timedelta64[None]) assert_type(m_int % m_int0, np.timedelta64[None]) assert_type(m_int % m_int, np.timedelta64[int | None]) @@ -48,20 +49,22 @@ assert_type(m_td % m_td, np.timedelta64[dt.timedelta | None]) assert_type(AR_m % m, npt.NDArray[np.timedelta64]) assert_type(m % AR_m, npt.NDArray[np.timedelta64]) -assert_type(divmod(m, m), tuple[np.int64, np.timedelta64]) -assert_type(divmod(m, m_nat), tuple[np.int64, np.timedelta64[None]]) -assert_type(divmod(m, m_int0), tuple[np.int64, np.timedelta64[None]]) -# workarounds for https://github.com/microsoft/pyright/issues/9663 +# +# NOTE: Mypy incorrectly infers `tuple[Any, ...]`, but pyright behaves correctly. +assert_type(m.__divmod__(m), tuple[np.int64, np.timedelta64]) # type: ignore[assert-type] +assert_type(m.__divmod__(m_nat), tuple[np.int64, np.timedelta64[None]]) +assert_type(m.__divmod__(m_int0), tuple[np.int64, np.timedelta64[None]]) assert_type(m.__divmod__(m_int), tuple[np.int64, np.timedelta64[int | None]]) -assert_type(divmod(m_nat, m), tuple[np.int64, np.timedelta64[None]]) -assert_type(divmod(m_int, m_nat), tuple[np.int64, np.timedelta64[None]]) -assert_type(divmod(m_int, m_int0), tuple[np.int64, np.timedelta64[None]]) -assert_type(divmod(m_int, m_int), tuple[np.int64, np.timedelta64[int | None]]) -assert_type(divmod(m_int, m_td), tuple[np.int64, np.timedelta64[int | None]]) -assert_type(divmod(m_td, m_nat), tuple[np.int64, np.timedelta64[None]]) -assert_type(divmod(m_td, m_int0), tuple[np.int64, np.timedelta64[None]]) -assert_type(divmod(m_td, m_int), tuple[np.int64, np.timedelta64[int | None]]) -assert_type(divmod(m_td, m_td), tuple[np.int64, np.timedelta64[dt.timedelta | None]]) +# NOTE: Mypy incorrectly infers `tuple[Any, ...]`, but pyright behaves correctly. +assert_type(m_nat.__divmod__(m), tuple[np.int64, np.timedelta64[None]]) # type: ignore[assert-type] +assert_type(m_int.__divmod__(m_nat), tuple[np.int64, np.timedelta64[None]]) +assert_type(m_int.__divmod__(m_int0), tuple[np.int64, np.timedelta64[None]]) +assert_type(m_int.__divmod__(m_int), tuple[np.int64, np.timedelta64[int | None]]) +assert_type(m_int.__divmod__(m_td), tuple[np.int64, np.timedelta64[int | None]]) +assert_type(m_td.__divmod__(m_nat), tuple[np.int64, np.timedelta64[None]]) +assert_type(m_td.__divmod__(m_int0), tuple[np.int64, np.timedelta64[None]]) +assert_type(m_td.__divmod__(m_int), tuple[np.int64, np.timedelta64[int | None]]) +assert_type(m_td.__divmod__(m_td), tuple[np.int64, np.timedelta64[dt.timedelta | None]]) assert_type(divmod(AR_m, m), tuple[npt.NDArray[np.int64], npt.NDArray[np.timedelta64]]) assert_type(divmod(m, AR_m), tuple[npt.NDArray[np.int64], npt.NDArray[np.timedelta64]]) @@ -79,7 +82,6 @@ assert_type(b_ % AR_b, npt.NDArray[np.int8]) assert_type(divmod(b_, b), tuple[np.int8, np.int8]) assert_type(divmod(b_, b_), tuple[np.int8, np.int8]) -# workarounds for https://github.com/microsoft/pyright/issues/9663 assert_type(b_.__divmod__(i), tuple[np.int_, np.int_]) assert_type(b_.__divmod__(f), tuple[np.float64, np.float64]) assert_type(b_.__divmod__(i8), tuple[np.int64, np.int64]) @@ -109,51 +111,49 @@ assert_type(divmod(AR_b, b_), tuple[npt.NDArray[np.int8], npt.NDArray[np.int8]]) assert_type(i8 % b, np.int64) assert_type(i8 % i8, np.int64) -assert_type(i8 % f, np.float64 | np.floating[_64Bit]) -assert_type(i8 % f8, np.float64 | np.floating[_64Bit]) -assert_type(i4 % i8, np.int64 | np.int32) -assert_type(i4 % f8, np.float64 | np.float32) +assert_type(i8 % f, np.float64) +assert_type(i8 % f8, np.float64) +assert_type(i4 % i8, np.signedinteger) +assert_type(i4 % f8, np.float64) assert_type(i4 % i4, np.int32) -assert_type(i4 % f4, np.float32) +assert_type(i4 % f4, np.floating) assert_type(i8 % AR_b, npt.NDArray[np.int64]) assert_type(divmod(i8, b), tuple[np.int64, np.int64]) -assert_type(divmod(i8, i4), tuple[np.int64, np.int64] | tuple[np.int32, np.int32]) +assert_type(divmod(i8, i4), tuple[np.signedinteger, np.signedinteger]) assert_type(divmod(i8, i8), tuple[np.int64, np.int64]) -# workarounds for https://github.com/microsoft/pyright/issues/9663 -assert_type(i8.__divmod__(f), tuple[np.floating[_64Bit], np.floating[_64Bit]] | tuple[np.float64, np.float64]) -assert_type(i8.__divmod__(f8), tuple[np.floating[_64Bit], np.floating[_64Bit]] | tuple[np.float64, np.float64]) -assert_type(divmod(i8, f4), tuple[np.floating[_64Bit], np.floating[_64Bit]] | tuple[np.float32, np.float32]) +assert_type(i8.__divmod__(f), tuple[np.float64, np.float64]) +assert_type(i8.__divmod__(f8), tuple[np.float64, np.float64]) +assert_type(divmod(i8, f4), tuple[np.floating, np.floating]) assert_type(divmod(i4, i4), tuple[np.int32, np.int32]) -assert_type(divmod(i4, f4), tuple[np.float32, np.float32]) +assert_type(divmod(i4, f4), tuple[np.floating, np.floating]) assert_type(divmod(i8, AR_b), tuple[npt.NDArray[np.int64], npt.NDArray[np.int64]]) assert_type(b % i8, np.int64) -assert_type(f % i8, np.float64 | np.floating[_64Bit]) +assert_type(f % i8, np.float64) assert_type(i8 % i8, np.int64) assert_type(f8 % i8, np.float64) -assert_type(i8 % i4, np.int64 | np.int32) +assert_type(i8 % i4, np.signedinteger) assert_type(f8 % i4, np.float64) assert_type(i4 % i4, np.int32) -assert_type(f4 % i4, np.float32) +assert_type(f4 % i4, np.floating) assert_type(AR_b % i8, npt.NDArray[np.int64]) assert_type(divmod(b, i8), tuple[np.int64, np.int64]) -assert_type(divmod(f, i8), tuple[np.floating[_64Bit], np.floating[_64Bit]] | tuple[np.float64, np.float64]) +assert_type(divmod(f, i8), tuple[np.float64, np.float64]) assert_type(divmod(i8, i8), tuple[np.int64, np.int64]) assert_type(divmod(f8, i8), tuple[np.float64, np.float64]) -assert_type(divmod(i4, i8), tuple[np.int64, np.int64] | tuple[np.int32, np.int32]) +assert_type(divmod(i4, i8), tuple[np.signedinteger, np.signedinteger]) assert_type(divmod(i4, i4), tuple[np.int32, np.int32]) -# workarounds for https://github.com/microsoft/pyright/issues/9663 -assert_type(f4.__divmod__(i8), tuple[np.floating[_64Bit], np.floating[_64Bit]] | tuple[np.float32, np.float32]) -assert_type(f4.__divmod__(i4), tuple[np.float32, np.float32]) +assert_type(f4.__divmod__(i8), tuple[np.floating, np.floating]) +assert_type(f4.__divmod__(i4), tuple[np.floating, np.floating]) assert_type(AR_b.__divmod__(i8), tuple[npt.NDArray[np.int64], npt.NDArray[np.int64]]) # float assert_type(f8 % b, np.float64) assert_type(f8 % f, np.float64) -assert_type(i8 % f4, np.floating[_64Bit] | np.float32) +assert_type(i8 % f4, np.floating) assert_type(f4 % f4, np.float32) assert_type(f8 % AR_b, npt.NDArray[np.float64]) @@ -174,7 +174,6 @@ assert_type(AR_b % f8, npt.NDArray[np.float64]) assert_type(divmod(b, f8), tuple[np.float64, np.float64]) assert_type(divmod(f8, f8), tuple[np.float64, np.float64]) assert_type(divmod(f4, f4), tuple[np.float32, np.float32]) -# workarounds for https://github.com/microsoft/pyright/issues/9663 assert_type(f8.__rdivmod__(f), tuple[np.float64, np.float64]) assert_type(f8.__rdivmod__(f4), tuple[np.float64, np.float64]) assert_type(AR_b.__divmod__(f8), tuple[npt.NDArray[np.float64], npt.NDArray[np.float64]]) diff --git a/numpy/typing/tests/data/reveal/multiarray.pyi b/numpy/typing/tests/data/reveal/multiarray.pyi index 6ba3fcde632f..ada8f7777696 100644 --- a/numpy/typing/tests/data/reveal/multiarray.pyi +++ b/numpy/typing/tests/data/reveal/multiarray.pyi @@ -1,12 +1,10 @@ import datetime as dt -from typing import Any, Literal, TypeVar, assert_type +from typing import Any, Literal, assert_type import numpy as np import numpy.typing as npt -_ScalarT_co = TypeVar("_ScalarT_co", bound=np.generic, covariant=True) - -class SubClass(npt.NDArray[_ScalarT_co]): ... +class SubClass[ScalarT: np.generic](np.ndarray[tuple[Any, ...], np.dtype[ScalarT]]): ... subclass: SubClass[np.float64] @@ -66,7 +64,7 @@ assert_type(np.inner(AR_f8, AR_i8), Any) assert_type(np.where([True, True, False]), tuple[npt.NDArray[np.intp], ...]) assert_type(np.where([True, True, False], 1, 0), npt.NDArray[Any]) -assert_type(np.lexsort([0, 1, 2]), Any) +assert_type(np.lexsort([0, 1, 2]), npt.NDArray[np.intp]) assert_type(np.can_cast(np.dtype("i8"), int), bool) assert_type(np.can_cast(AR_f8, "f8"), bool) @@ -94,16 +92,19 @@ assert_type(np.copyto(AR_f8, [1., 1.5, 1.6]), None) assert_type(np.putmask(AR_f8, [True, True, False], 1.5), None) -assert_type(np.packbits(AR_i8), npt.NDArray[np.uint8]) -assert_type(np.packbits(AR_u1), npt.NDArray[np.uint8]) +assert_type(np.packbits(AR_i8), np.ndarray[tuple[int], np.dtype[np.uint8]]) +assert_type(np.packbits(AR_u1), np.ndarray[tuple[int], np.dtype[np.uint8]]) +assert_type(np.packbits(AR_i8, axis=1), npt.NDArray[np.uint8]) +assert_type(np.packbits(AR_u1, axis=1), npt.NDArray[np.uint8]) -assert_type(np.unpackbits(AR_u1), npt.NDArray[np.uint8]) +assert_type(np.unpackbits(AR_u1), np.ndarray[tuple[int], np.dtype[np.uint8]]) +assert_type(np.unpackbits(AR_u1, axis=1), npt.NDArray[np.uint8]) assert_type(np.shares_memory(1, 2), bool) -assert_type(np.shares_memory(AR_f8, AR_f8, max_work=1), bool) +assert_type(np.shares_memory(AR_f8, AR_f8, max_work=-1), bool) assert_type(np.may_share_memory(1, 2), bool) -assert_type(np.may_share_memory(AR_f8, AR_f8, max_work=1), bool) +assert_type(np.may_share_memory(AR_f8, AR_f8, max_work=0), bool) assert_type(np.promote_types(np.int32, np.int64), np.dtype) assert_type(np.promote_types("f4", float), np.dtype) @@ -167,9 +168,10 @@ assert_type(np.busday_count("2011-01", "2011-02"), np.int_) assert_type(np.busday_count(["2011-01"], "2011-02"), npt.NDArray[np.int_]) assert_type(np.busday_count(["2011-01"], date_scalar), npt.NDArray[np.int_]) -assert_type(np.busday_offset(M, m), np.datetime64) +# NOTE: Mypy incorrectly infers `Any`, but pyright behaves correctly. +assert_type(np.busday_offset(M, m), np.datetime64) # type: ignore[assert-type] +assert_type(np.busday_offset(M, 5), np.datetime64) # type: ignore[assert-type] assert_type(np.busday_offset(date_scalar, m), np.datetime64) -assert_type(np.busday_offset(M, 5), np.datetime64) assert_type(np.busday_offset(AR_M, m), npt.NDArray[np.datetime64]) assert_type(np.busday_offset(M, timedelta_seq), npt.NDArray[np.datetime64]) assert_type(np.busday_offset("2011-01", "2011-02", roll="forward"), np.datetime64) @@ -179,7 +181,8 @@ assert_type(np.is_busday("2012"), np.bool) assert_type(np.is_busday(date_scalar), np.bool) assert_type(np.is_busday(["2012"]), npt.NDArray[np.bool]) -assert_type(np.datetime_as_string(M), np.str_) +# NOTE: Mypy incorrectly infers `Any`, but pyright behaves correctly. +assert_type(np.datetime_as_string(M), np.str_) # type: ignore[assert-type] assert_type(np.datetime_as_string(AR_M), npt.NDArray[np.str_]) assert_type(np.busdaycalendar(holidays=date_seq), np.busdaycalendar) diff --git a/numpy/typing/tests/data/reveal/nbit_base_example.pyi b/numpy/typing/tests/data/reveal/nbit_base_example.pyi index 33229660b6f8..c6e931eaca84 100644 --- a/numpy/typing/tests/data/reveal/nbit_base_example.pyi +++ b/numpy/typing/tests/data/reveal/nbit_base_example.pyi @@ -1,14 +1,10 @@ -from typing import TypeVar, assert_type +from typing import assert_type import numpy as np import numpy.typing as npt from numpy._typing import _32Bit, _64Bit -T1 = TypeVar("T1", bound=npt.NBitBase) # type: ignore[deprecated] # pyright: ignore[reportDeprecated] -T2 = TypeVar("T2", bound=npt.NBitBase) # type: ignore[deprecated] # pyright: ignore[reportDeprecated] - -def add(a: np.floating[T1], b: np.integer[T2]) -> np.floating[T1 | T2]: - return a + b +def add[T1: npt.NBitBase, T2: npt.NBitBase](a: np.floating[T1], b: np.integer[T2]) -> np.floating[T1 | T2]: ... # type: ignore[deprecated] i8: np.int64 i4: np.int32 diff --git a/numpy/typing/tests/data/reveal/ndarray_assignability.pyi b/numpy/typing/tests/data/reveal/ndarray_assignability.pyi index d754a94003d3..e8ccc573d642 100644 --- a/numpy/typing/tests/data/reveal/ndarray_assignability.pyi +++ b/numpy/typing/tests/data/reveal/ndarray_assignability.pyi @@ -1,38 +1,36 @@ -from typing import Protocol, TypeAlias, TypeVar, assert_type +from typing import Any, Protocol, assert_type import numpy as np from numpy._typing import _64Bit -_T = TypeVar("_T") -_T_co = TypeVar("_T_co", covariant=True) +class CanAbs[T](Protocol): + def __abs__(self, /) -> T: ... -class CanAbs(Protocol[_T_co]): - def __abs__(self, /) -> _T_co: ... +class CanInvert[T](Protocol): + def __invert__(self, /) -> T: ... -class CanInvert(Protocol[_T_co]): - def __invert__(self, /) -> _T_co: ... +class CanNeg[T](Protocol): + def __neg__(self, /) -> T: ... -class CanNeg(Protocol[_T_co]): - def __neg__(self, /) -> _T_co: ... +class CanPos[T](Protocol): + def __pos__(self, /) -> T: ... -class CanPos(Protocol[_T_co]): - def __pos__(self, /) -> _T_co: ... +def do_abs[T](x: CanAbs[T]) -> T: ... +def do_invert[T](x: CanInvert[T]) -> T: ... +def do_neg[T](x: CanNeg[T]) -> T: ... +def do_pos[T](x: CanPos[T]) -> T: ... -def do_abs(x: CanAbs[_T]) -> _T: ... -def do_invert(x: CanInvert[_T]) -> _T: ... -def do_neg(x: CanNeg[_T]) -> _T: ... -def do_pos(x: CanPos[_T]) -> _T: ... - -_Bool_1d: TypeAlias = np.ndarray[tuple[int], np.dtype[np.bool]] -_UInt8_1d: TypeAlias = np.ndarray[tuple[int], np.dtype[np.uint8]] -_Int16_1d: TypeAlias = np.ndarray[tuple[int], np.dtype[np.int16]] -_LongLong_1d: TypeAlias = np.ndarray[tuple[int], np.dtype[np.longlong]] -_Float32_1d: TypeAlias = np.ndarray[tuple[int], np.dtype[np.float32]] -_Float64_1d: TypeAlias = np.ndarray[tuple[int], np.dtype[np.float64]] -_LongDouble_1d: TypeAlias = np.ndarray[tuple[int], np.dtype[np.longdouble]] -_Complex64_1d: TypeAlias = np.ndarray[tuple[int], np.dtype[np.complex64]] -_Complex128_1d: TypeAlias = np.ndarray[tuple[int], np.dtype[np.complex128]] -_CLongDouble_1d: TypeAlias = np.ndarray[tuple[int], np.dtype[np.clongdouble]] +type _Bool_1d = np.ndarray[tuple[int], np.dtype[np.bool]] +type _UInt8_1d = np.ndarray[tuple[int], np.dtype[np.uint8]] +type _Int16_1d = np.ndarray[tuple[int], np.dtype[np.int16]] +type _LongLong_1d = np.ndarray[tuple[int], np.dtype[np.longlong]] +type _Float32_1d = np.ndarray[tuple[int], np.dtype[np.float32]] +type _Float64_1d = np.ndarray[tuple[int], np.dtype[np.float64]] +type _LongDouble_1d = np.ndarray[tuple[int], np.dtype[np.longdouble]] +type _Complex64_1d = np.ndarray[tuple[int], np.dtype[np.complex64]] +type _Complex128_1d = np.ndarray[tuple[int], np.dtype[np.complex128]] +type _CLongDouble_1d = np.ndarray[tuple[int], np.dtype[np.clongdouble]] +type _Void_1d = np.ndarray[tuple[int], np.dtype[np.void]] b1_1d: _Bool_1d u1_1d: _UInt8_1d @@ -44,6 +42,7 @@ g_1d: _LongDouble_1d c8_1d: _Complex64_1d c16_1d: _Complex128_1d G_1d: _CLongDouble_1d +V_1d: _Void_1d assert_type(do_abs(b1_1d), _Bool_1d) assert_type(do_abs(u1_1d), _UInt8_1d) @@ -75,3 +74,6 @@ assert_type(do_pos(i2_1d), _Int16_1d) assert_type(do_pos(q_1d), _LongLong_1d) assert_type(do_pos(f4_1d), _Float32_1d) assert_type(do_pos(c16_1d), _Complex128_1d) + +# this shape is effectively equivalent to `tuple[int, *tuple[Any, ...]]`, i.e. ndim >= 1 +assert_type(V_1d["field"], np.ndarray[tuple[int] | tuple[Any, ...]]) diff --git a/numpy/typing/tests/data/reveal/ndarray_conversion.pyi b/numpy/typing/tests/data/reveal/ndarray_conversion.pyi index bbd42573a774..2af616440c5e 100644 --- a/numpy/typing/tests/data/reveal/ndarray_conversion.pyi +++ b/numpy/typing/tests/data/reveal/ndarray_conversion.pyi @@ -37,8 +37,6 @@ any_sctype: np.ndarray[Any, Any] assert_type(any_dtype.tolist(), Any) assert_type(any_sctype.tolist(), Any) - -# itemset does not return a value # tobytes is pretty simple # tofile does not return a value # dump does not return a value @@ -73,7 +71,7 @@ assert_type(i0_nd.copy("C"), npt.NDArray[np.int_]) assert_type(i0_nd.view(), npt.NDArray[np.int_]) assert_type(i0_nd.view(np.float64), npt.NDArray[np.float64]) assert_type(i0_nd.view(float), npt.NDArray[Any]) -assert_type(i0_nd.view(np.float64, np.matrix), np.matrix[tuple[int, int], Any]) +assert_type(i0_nd.view(np.float64, np.matrix), np.matrix) # getfield assert_type(i0_nd.getfield("float"), npt.NDArray[Any]) diff --git a/numpy/typing/tests/data/reveal/ndarray_misc.pyi b/numpy/typing/tests/data/reveal/ndarray_misc.pyi index 465ce7679b49..be0666f95fcb 100644 --- a/numpy/typing/tests/data/reveal/ndarray_misc.pyi +++ b/numpy/typing/tests/data/reveal/ndarray_misc.pyi @@ -8,15 +8,15 @@ function-based counterpart in `../from_numeric.py`. import ctypes as ct import operator +from collections.abc import Iterator from types import ModuleType from typing import Any, Literal, assert_type - from typing_extensions import CapsuleType import numpy as np import numpy.typing as npt -class SubClass(npt.NDArray[np.object_]): ... +class SubClass(np.ndarray[tuple[Any, ...], np.dtype[np.object_]]): ... f8: np.float64 i8: np.int64 @@ -29,6 +29,10 @@ AR_m: npt.NDArray[np.timedelta64] AR_U: npt.NDArray[np.str_] AR_V: npt.NDArray[np.void] +AR_f8_1d: np.ndarray[tuple[int], np.dtype[np.float64]] +AR_f8_2d: np.ndarray[tuple[int, int], np.dtype[np.float64]] +AR_f8_3d: np.ndarray[tuple[int, int, int], np.dtype[np.float64]] + ctypes_obj = AR_f8.ctypes assert_type(AR_f8.__dlpack__(), CapsuleType) @@ -58,15 +62,15 @@ assert_type(AR_f8.any(out=B), SubClass) assert_type(f8.argmax(), np.intp) assert_type(AR_f8.argmax(), np.intp) assert_type(AR_f8.argmax(axis=0), Any) -assert_type(AR_f8.argmax(out=AR_i8), npt.NDArray[np.intp]) +assert_type(AR_f8.argmax(out=AR_i8), npt.NDArray[np.int64]) assert_type(f8.argmin(), np.intp) assert_type(AR_f8.argmin(), np.intp) assert_type(AR_f8.argmin(axis=0), Any) -assert_type(AR_f8.argmin(out=AR_i8), npt.NDArray[np.intp]) +assert_type(AR_f8.argmin(out=AR_i8), npt.NDArray[np.int64]) -assert_type(f8.argsort(), npt.NDArray[Any]) -assert_type(AR_f8.argsort(), npt.NDArray[Any]) +assert_type(f8.argsort(), npt.NDArray[np.intp]) +assert_type(AR_f8.argsort(), npt.NDArray[np.intp]) assert_type(f8.astype(np.int64).choose([()]), npt.NDArray[Any]) assert_type(AR_f8.choose([0]), npt.NDArray[Any]) @@ -165,7 +169,7 @@ assert_type(AR_f8.dot(1), npt.NDArray[Any]) assert_type(AR_f8.dot([1]), Any) assert_type(AR_f8.dot(1, out=B), SubClass) -assert_type(AR_f8.nonzero(), tuple[npt.NDArray[np.intp], ...]) +assert_type(AR_f8.nonzero(), tuple[np.ndarray[tuple[int], np.dtype[np.intp]], ...]) assert_type(AR_f8.searchsorted(1), np.intp) assert_type(AR_f8.searchsorted([1]), npt.NDArray[np.intp]) @@ -235,3 +239,8 @@ assert_type(AR_m.to_device("cpu"), npt.NDArray[np.timedelta64]) assert_type(f8.__array_namespace__(), ModuleType) assert_type(AR_f8.__array_namespace__(), ModuleType) + +assert_type(iter(AR_f8), Iterator[Any]) # any-D +assert_type(iter(AR_f8_1d), Iterator[np.float64]) # 1-D +assert_type(iter(AR_f8_2d), Iterator[npt.NDArray[np.float64]]) # 2-D +assert_type(iter(AR_f8_3d), Iterator[npt.NDArray[np.float64]]) # 3-D diff --git a/numpy/typing/tests/data/reveal/ndarray_shape_manipulation.pyi b/numpy/typing/tests/data/reveal/ndarray_shape_manipulation.pyi index 4447bb13d2ad..6bbe057ff5b7 100644 --- a/numpy/typing/tests/data/reveal/ndarray_shape_manipulation.pyi +++ b/numpy/typing/tests/data/reveal/ndarray_shape_manipulation.pyi @@ -3,37 +3,45 @@ from typing import assert_type import numpy as np import numpy.typing as npt -nd: npt.NDArray[np.int64] +type _ArrayND = npt.NDArray[np.int64] +type _Array2D = np.ndarray[tuple[int, int], np.dtype[np.int8]] +type _Array3D = np.ndarray[tuple[int, int, int], np.dtype[np.bool]] + +_nd: _ArrayND +_2d: _Array2D +_3d: _Array3D # reshape -assert_type(nd.reshape(None), npt.NDArray[np.int64]) -assert_type(nd.reshape(4), np.ndarray[tuple[int], np.dtype[np.int64]]) -assert_type(nd.reshape((4,)), np.ndarray[tuple[int], np.dtype[np.int64]]) -assert_type(nd.reshape(2, 2), np.ndarray[tuple[int, int], np.dtype[np.int64]]) -assert_type(nd.reshape((2, 2)), np.ndarray[tuple[int, int], np.dtype[np.int64]]) +assert_type(_nd.reshape(None), npt.NDArray[np.int64]) +assert_type(_nd.reshape(4), np.ndarray[tuple[int], np.dtype[np.int64]]) +assert_type(_nd.reshape((4,)), np.ndarray[tuple[int], np.dtype[np.int64]]) +assert_type(_nd.reshape(2, 2), np.ndarray[tuple[int, int], np.dtype[np.int64]]) +assert_type(_nd.reshape((2, 2)), np.ndarray[tuple[int, int], np.dtype[np.int64]]) -assert_type(nd.reshape((2, 2), order="C"), np.ndarray[tuple[int, int], np.dtype[np.int64]]) -assert_type(nd.reshape(4, order="C"), np.ndarray[tuple[int], np.dtype[np.int64]]) +assert_type(_nd.reshape((2, 2), order="C"), np.ndarray[tuple[int, int], np.dtype[np.int64]]) +assert_type(_nd.reshape(4, order="C"), np.ndarray[tuple[int], np.dtype[np.int64]]) # resize does not return a value # transpose -assert_type(nd.transpose(), npt.NDArray[np.int64]) -assert_type(nd.transpose(1, 0), npt.NDArray[np.int64]) -assert_type(nd.transpose((1, 0)), npt.NDArray[np.int64]) +assert_type(_nd.transpose(), npt.NDArray[np.int64]) +assert_type(_nd.transpose(1, 0), npt.NDArray[np.int64]) +assert_type(_nd.transpose((1, 0)), npt.NDArray[np.int64]) # swapaxes -assert_type(nd.swapaxes(0, 1), npt.NDArray[np.int64]) +assert_type(_nd.swapaxes(0, 1), _ArrayND) +assert_type(_2d.swapaxes(0, 1), _Array2D) +assert_type(_3d.swapaxes(0, 1), _Array3D) # flatten -assert_type(nd.flatten(), np.ndarray[tuple[int], np.dtype[np.int64]]) -assert_type(nd.flatten("C"), np.ndarray[tuple[int], np.dtype[np.int64]]) +assert_type(_nd.flatten(), np.ndarray[tuple[int], np.dtype[np.int64]]) +assert_type(_nd.flatten("C"), np.ndarray[tuple[int], np.dtype[np.int64]]) # ravel -assert_type(nd.ravel(), np.ndarray[tuple[int], np.dtype[np.int64]]) -assert_type(nd.ravel("C"), np.ndarray[tuple[int], np.dtype[np.int64]]) +assert_type(_nd.ravel(), np.ndarray[tuple[int], np.dtype[np.int64]]) +assert_type(_nd.ravel("C"), np.ndarray[tuple[int], np.dtype[np.int64]]) # squeeze -assert_type(nd.squeeze(), npt.NDArray[np.int64]) -assert_type(nd.squeeze(0), npt.NDArray[np.int64]) -assert_type(nd.squeeze((0, 2)), npt.NDArray[np.int64]) +assert_type(_nd.squeeze(), npt.NDArray[np.int64]) +assert_type(_nd.squeeze(0), npt.NDArray[np.int64]) +assert_type(_nd.squeeze((0, 2)), npt.NDArray[np.int64]) diff --git a/numpy/typing/tests/data/reveal/npyio.pyi b/numpy/typing/tests/data/reveal/npyio.pyi index 40da72c8544e..e3eaa45a5fa1 100644 --- a/numpy/typing/tests/data/reveal/npyio.pyi +++ b/numpy/typing/tests/data/reveal/npyio.pyi @@ -28,7 +28,7 @@ class BytesReader: bytes_writer: BytesWriter bytes_reader: BytesReader -assert_type(npz_file.zip, zipfile.ZipFile) +assert_type(npz_file.zip, zipfile.ZipFile | None) assert_type(npz_file.fid, IO[str] | None) assert_type(npz_file.files, list[str]) assert_type(npz_file.allow_pickle, bool) diff --git a/numpy/typing/tests/data/reveal/numeric.pyi b/numpy/typing/tests/data/reveal/numeric.pyi index 7c1ea8958e3b..7b3abc2d6761 100644 --- a/numpy/typing/tests/data/reveal/numeric.pyi +++ b/numpy/typing/tests/data/reveal/numeric.pyi @@ -10,7 +10,7 @@ from typing import Any, assert_type import numpy as np import numpy.typing as npt -class SubClass(npt.NDArray[np.int64]): ... +class SubClass(np.ndarray[tuple[Any, ...], np.dtype[np.int64]]): ... i8: np.int64 @@ -22,89 +22,127 @@ AR_c16: npt.NDArray[np.complex128] AR_m: npt.NDArray[np.timedelta64] AR_O: npt.NDArray[np.object_] -B: list[int] -C: SubClass +_sub_nd_i8: SubClass + +_to_1d_bool: list[bool] +_to_1d_int: list[int] +_to_1d_float: list[float] +_to_1d_complex: list[complex] + +### assert_type(np.count_nonzero(i8), np.intp) assert_type(np.count_nonzero(AR_i8), np.intp) -assert_type(np.count_nonzero(B), np.intp) +assert_type(np.count_nonzero(_to_1d_int), np.intp) assert_type(np.count_nonzero(AR_i8, keepdims=True), npt.NDArray[np.intp]) assert_type(np.count_nonzero(AR_i8, axis=0), Any) assert_type(np.isfortran(i8), bool) assert_type(np.isfortran(AR_i8), bool) -assert_type(np.argwhere(i8), npt.NDArray[np.intp]) -assert_type(np.argwhere(AR_i8), npt.NDArray[np.intp]) - -assert_type(np.flatnonzero(i8), npt.NDArray[np.intp]) -assert_type(np.flatnonzero(AR_i8), npt.NDArray[np.intp]) - -assert_type(np.correlate(B, AR_i8, mode="valid"), npt.NDArray[np.signedinteger]) -assert_type(np.correlate(AR_i8, AR_i8, mode="same"), npt.NDArray[np.signedinteger]) -assert_type(np.correlate(AR_b, AR_b), npt.NDArray[np.bool]) -assert_type(np.correlate(AR_b, AR_u8), npt.NDArray[np.unsignedinteger]) -assert_type(np.correlate(AR_i8, AR_b), npt.NDArray[np.signedinteger]) -assert_type(np.correlate(AR_i8, AR_f8), npt.NDArray[np.floating]) -assert_type(np.correlate(AR_i8, AR_c16), npt.NDArray[np.complexfloating]) -assert_type(np.correlate(AR_i8, AR_m), npt.NDArray[np.timedelta64]) -assert_type(np.correlate(AR_O, AR_O), npt.NDArray[np.object_]) - -assert_type(np.convolve(B, AR_i8, mode="valid"), npt.NDArray[np.signedinteger]) -assert_type(np.convolve(AR_i8, AR_i8, mode="same"), npt.NDArray[np.signedinteger]) -assert_type(np.convolve(AR_b, AR_b), npt.NDArray[np.bool]) -assert_type(np.convolve(AR_b, AR_u8), npt.NDArray[np.unsignedinteger]) -assert_type(np.convolve(AR_i8, AR_b), npt.NDArray[np.signedinteger]) -assert_type(np.convolve(AR_i8, AR_f8), npt.NDArray[np.floating]) -assert_type(np.convolve(AR_i8, AR_c16), npt.NDArray[np.complexfloating]) -assert_type(np.convolve(AR_i8, AR_m), npt.NDArray[np.timedelta64]) -assert_type(np.convolve(AR_O, AR_O), npt.NDArray[np.object_]) - -assert_type(np.outer(i8, AR_i8), npt.NDArray[np.signedinteger]) -assert_type(np.outer(B, AR_i8), npt.NDArray[np.signedinteger]) -assert_type(np.outer(AR_i8, AR_i8), npt.NDArray[np.signedinteger]) -assert_type(np.outer(AR_i8, AR_i8, out=C), SubClass) -assert_type(np.outer(AR_b, AR_b), npt.NDArray[np.bool]) -assert_type(np.outer(AR_b, AR_u8), npt.NDArray[np.unsignedinteger]) -assert_type(np.outer(AR_i8, AR_b), npt.NDArray[np.signedinteger]) -assert_type(np.convolve(AR_i8, AR_f8), npt.NDArray[np.floating]) -assert_type(np.outer(AR_i8, AR_c16), npt.NDArray[np.complexfloating]) -assert_type(np.outer(AR_i8, AR_m), npt.NDArray[np.timedelta64]) -assert_type(np.outer(AR_O, AR_O), npt.NDArray[np.object_]) - -assert_type(np.tensordot(B, AR_i8), npt.NDArray[np.signedinteger]) -assert_type(np.tensordot(AR_i8, AR_i8), npt.NDArray[np.signedinteger]) -assert_type(np.tensordot(AR_i8, AR_i8, axes=0), npt.NDArray[np.signedinteger]) -assert_type(np.tensordot(AR_i8, AR_i8, axes=(0, 1)), npt.NDArray[np.signedinteger]) +assert_type(np.argwhere(i8), np.ndarray[tuple[int, int], np.dtype[np.intp]]) +assert_type(np.argwhere(AR_i8), np.ndarray[tuple[int, int], np.dtype[np.intp]]) + +assert_type(np.flatnonzero(i8), np.ndarray[tuple[int], np.dtype[np.intp]]) +assert_type(np.flatnonzero(AR_i8), np.ndarray[tuple[int], np.dtype[np.intp]]) + +# NOTE: Mypy incorrectly infers `np.ndarray[Any, Any]` for timedelta64 + +# correlate +assert_type(np.correlate(AR_i8, AR_i8), np.ndarray[tuple[int], np.dtype[np.int64]]) +assert_type(np.correlate(AR_b, AR_b), np.ndarray[tuple[int], np.dtype[np.bool]]) +assert_type(np.correlate(AR_u8, AR_u8), np.ndarray[tuple[int], np.dtype[np.uint64]]) +assert_type(np.correlate(AR_i8, AR_i8), np.ndarray[tuple[int], np.dtype[np.int64]]) +assert_type(np.correlate(AR_f8, AR_f8), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(np.correlate(AR_f8, AR_i8), np.ndarray[tuple[int], np.dtype[np.float64 | Any]]) +assert_type(np.correlate(AR_c16, AR_c16), np.ndarray[tuple[int], np.dtype[np.complex128]]) +assert_type(np.correlate(AR_c16, AR_f8), np.ndarray[tuple[int], np.dtype[np.complex128 | Any]]) +assert_type(np.correlate(AR_m, AR_m), np.ndarray[tuple[int], np.dtype[np.timedelta64]]) # type: ignore[assert-type] +assert_type(np.correlate(AR_i8, AR_m), np.ndarray[tuple[int], np.dtype[np.timedelta64 | Any]]) +assert_type(np.correlate(AR_O, AR_O), np.ndarray[tuple[int], np.dtype[np.object_]]) +assert_type(np.correlate(_to_1d_bool, _to_1d_bool), np.ndarray[tuple[int], np.dtype[np.bool]]) +assert_type(np.correlate(_to_1d_int, _to_1d_int), np.ndarray[tuple[int], np.dtype[np.int_ | Any]]) +assert_type(np.correlate(_to_1d_float, _to_1d_float), np.ndarray[tuple[int], np.dtype[np.float64 | Any]]) +assert_type(np.correlate(_to_1d_complex, _to_1d_complex), np.ndarray[tuple[int], np.dtype[np.complex128 | Any]]) + +# convolve (same as correlate) +assert_type(np.convolve(AR_i8, AR_i8), np.ndarray[tuple[int], np.dtype[np.int64]]) +assert_type(np.convolve(AR_b, AR_b), np.ndarray[tuple[int], np.dtype[np.bool]]) +assert_type(np.convolve(AR_u8, AR_u8), np.ndarray[tuple[int], np.dtype[np.uint64]]) +assert_type(np.convolve(AR_i8, AR_i8), np.ndarray[tuple[int], np.dtype[np.int64]]) +assert_type(np.convolve(AR_f8, AR_f8), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(np.convolve(AR_f8, AR_i8), np.ndarray[tuple[int], np.dtype[np.float64 | Any]]) +assert_type(np.convolve(AR_c16, AR_c16), np.ndarray[tuple[int], np.dtype[np.complex128]]) +assert_type(np.convolve(AR_c16, AR_f8), np.ndarray[tuple[int], np.dtype[np.complex128 | Any]]) +assert_type(np.convolve(AR_m, AR_m), np.ndarray[tuple[int], np.dtype[np.timedelta64]]) # type: ignore[assert-type] +assert_type(np.convolve(AR_i8, AR_m), np.ndarray[tuple[int], np.dtype[np.timedelta64 | Any]]) +assert_type(np.convolve(AR_O, AR_O), np.ndarray[tuple[int], np.dtype[np.object_]]) +assert_type(np.convolve(_to_1d_bool, _to_1d_bool), np.ndarray[tuple[int], np.dtype[np.bool]]) +assert_type(np.convolve(_to_1d_int, _to_1d_int), np.ndarray[tuple[int], np.dtype[np.int_ | Any]]) +assert_type(np.convolve(_to_1d_float, _to_1d_float), np.ndarray[tuple[int], np.dtype[np.float64 | Any]]) +assert_type(np.convolve(_to_1d_complex, _to_1d_complex), np.ndarray[tuple[int], np.dtype[np.complex128 | Any]]) + +# outer (very similar to above, but 2D output) +assert_type(np.outer(AR_i8, AR_i8), np.ndarray[tuple[int, int], np.dtype[np.int64]]) +assert_type(np.outer(AR_b, AR_b), np.ndarray[tuple[int, int], np.dtype[np.bool]]) +assert_type(np.outer(AR_u8, AR_u8), np.ndarray[tuple[int, int], np.dtype[np.uint64]]) +assert_type(np.outer(AR_i8, AR_i8), np.ndarray[tuple[int, int], np.dtype[np.int64]]) +assert_type(np.outer(AR_f8, AR_f8), np.ndarray[tuple[int, int], np.dtype[np.float64]]) +assert_type(np.outer(AR_f8, AR_i8), np.ndarray[tuple[int, int], np.dtype[np.float64 | Any]]) +assert_type(np.outer(AR_c16, AR_c16), np.ndarray[tuple[int, int], np.dtype[np.complex128]]) +assert_type(np.outer(AR_c16, AR_f8), np.ndarray[tuple[int, int], np.dtype[np.complex128 | Any]]) +assert_type(np.outer(AR_m, AR_m), np.ndarray[tuple[int, int], np.dtype[np.timedelta64]]) # type: ignore[assert-type] +assert_type(np.outer(AR_i8, AR_m), np.ndarray[tuple[int, int], np.dtype[np.timedelta64 | Any]]) +assert_type(np.outer(AR_O, AR_O), np.ndarray[tuple[int, int], np.dtype[np.object_]]) +assert_type(np.outer(AR_i8, AR_i8, out=_sub_nd_i8), SubClass) +assert_type(np.outer(_to_1d_bool, _to_1d_bool), np.ndarray[tuple[int, int], np.dtype[np.bool]]) +assert_type(np.outer(_to_1d_int, _to_1d_int), np.ndarray[tuple[int, int], np.dtype[np.int_ | Any]]) +assert_type(np.outer(_to_1d_float, _to_1d_float), np.ndarray[tuple[int, int], np.dtype[np.float64 | Any]]) +assert_type(np.outer(_to_1d_complex, _to_1d_complex), np.ndarray[tuple[int, int], np.dtype[np.complex128 | Any]]) + +# tensordot +assert_type(np.tensordot(AR_i8, AR_i8), npt.NDArray[np.int64]) assert_type(np.tensordot(AR_b, AR_b), npt.NDArray[np.bool]) -assert_type(np.tensordot(AR_b, AR_u8), npt.NDArray[np.unsignedinteger]) -assert_type(np.tensordot(AR_i8, AR_b), npt.NDArray[np.signedinteger]) -assert_type(np.tensordot(AR_i8, AR_f8), npt.NDArray[np.floating]) -assert_type(np.tensordot(AR_i8, AR_c16), npt.NDArray[np.complexfloating]) -assert_type(np.tensordot(AR_i8, AR_m), npt.NDArray[np.timedelta64]) +assert_type(np.tensordot(AR_u8, AR_u8), npt.NDArray[np.uint64]) +assert_type(np.tensordot(AR_i8, AR_i8), npt.NDArray[np.int64]) +assert_type(np.tensordot(AR_f8, AR_f8), npt.NDArray[np.float64]) +assert_type(np.tensordot(AR_f8, AR_i8), npt.NDArray[np.float64 | Any]) +assert_type(np.tensordot(AR_c16, AR_c16), npt.NDArray[np.complex128]) +assert_type(np.tensordot(AR_c16, AR_f8), npt.NDArray[np.complex128 | Any]) +assert_type(np.tensordot(AR_m, AR_m), npt.NDArray[np.timedelta64]) assert_type(np.tensordot(AR_O, AR_O), npt.NDArray[np.object_]) +assert_type(np.tensordot(_to_1d_bool, _to_1d_bool), npt.NDArray[np.bool]) +assert_type(np.tensordot(_to_1d_int, _to_1d_int), npt.NDArray[np.int_ | Any]) +assert_type(np.tensordot(_to_1d_float, _to_1d_float), npt.NDArray[np.float64 | Any]) +assert_type(np.tensordot(_to_1d_complex, _to_1d_complex), npt.NDArray[np.complex128 | Any]) + +# cross +assert_type(np.cross(AR_i8, AR_i8), npt.NDArray[np.int64]) +assert_type(np.cross(AR_u8, AR_u8), npt.NDArray[np.uint64]) +assert_type(np.cross(AR_i8, AR_i8), npt.NDArray[np.int64]) +assert_type(np.cross(AR_f8, AR_f8), npt.NDArray[np.float64]) +assert_type(np.cross(AR_f8, AR_i8), npt.NDArray[np.float64 | Any]) +assert_type(np.cross(AR_c16, AR_c16), npt.NDArray[np.complex128]) +assert_type(np.cross(AR_c16, AR_f8), npt.NDArray[np.complex128 | Any]) +assert_type(np.cross(AR_m, AR_m), npt.NDArray[np.timedelta64]) +assert_type(np.cross(AR_O, AR_O), npt.NDArray[np.object_]) +assert_type(np.cross(_to_1d_int, _to_1d_int), npt.NDArray[np.int_ | Any]) +assert_type(np.cross(_to_1d_float, _to_1d_float), npt.NDArray[np.float64 | Any]) +assert_type(np.cross(_to_1d_complex, _to_1d_complex), npt.NDArray[np.complex128 | Any]) assert_type(np.isscalar(i8), bool) assert_type(np.isscalar(AR_i8), bool) -assert_type(np.isscalar(B), bool) +assert_type(np.isscalar(_to_1d_int), bool) assert_type(np.roll(AR_i8, 1), npt.NDArray[np.int64]) assert_type(np.roll(AR_i8, (1, 2)), npt.NDArray[np.int64]) -assert_type(np.roll(B, 1), npt.NDArray[Any]) +assert_type(np.roll(_to_1d_int, 1), npt.NDArray[Any]) assert_type(np.rollaxis(AR_i8, 0, 1), npt.NDArray[np.int64]) assert_type(np.moveaxis(AR_i8, 0, 1), npt.NDArray[np.int64]) assert_type(np.moveaxis(AR_i8, (0, 1), (1, 2)), npt.NDArray[np.int64]) -assert_type(np.cross(B, AR_i8), npt.NDArray[np.signedinteger]) -assert_type(np.cross(AR_i8, AR_i8), npt.NDArray[np.signedinteger]) -assert_type(np.cross(AR_b, AR_u8), npt.NDArray[np.unsignedinteger]) -assert_type(np.cross(AR_i8, AR_b), npt.NDArray[np.signedinteger]) -assert_type(np.cross(AR_i8, AR_f8), npt.NDArray[np.floating]) -assert_type(np.cross(AR_i8, AR_c16), npt.NDArray[np.complexfloating]) -assert_type(np.cross(AR_O, AR_O), npt.NDArray[np.object_]) - assert_type(np.indices([0, 1, 2]), npt.NDArray[np.int_]) assert_type(np.indices([0, 1, 2], sparse=True), tuple[npt.NDArray[np.int_], ...]) assert_type(np.indices([0, 1, 2], dtype=np.float64), npt.NDArray[np.float64]) @@ -117,18 +155,18 @@ assert_type(np.binary_repr(1), str) assert_type(np.base_repr(1), str) assert_type(np.allclose(i8, AR_i8), bool) -assert_type(np.allclose(B, AR_i8), bool) +assert_type(np.allclose(_to_1d_int, AR_i8), bool) assert_type(np.allclose(AR_i8, AR_i8), bool) assert_type(np.isclose(i8, i8), np.bool) assert_type(np.isclose(i8, AR_i8), npt.NDArray[np.bool]) -assert_type(np.isclose(B, AR_i8), npt.NDArray[np.bool]) +assert_type(np.isclose(_to_1d_int, _to_1d_int), np.ndarray[tuple[int], np.dtype[np.bool]]) assert_type(np.isclose(AR_i8, AR_i8), npt.NDArray[np.bool]) assert_type(np.array_equal(i8, AR_i8), bool) -assert_type(np.array_equal(B, AR_i8), bool) +assert_type(np.array_equal(_to_1d_int, AR_i8), bool) assert_type(np.array_equal(AR_i8, AR_i8), bool) assert_type(np.array_equiv(i8, AR_i8), bool) -assert_type(np.array_equiv(B, AR_i8), bool) +assert_type(np.array_equiv(_to_1d_int, AR_i8), bool) assert_type(np.array_equiv(AR_i8, AR_i8), bool) diff --git a/numpy/typing/tests/data/reveal/numerictypes.pyi b/numpy/typing/tests/data/reveal/numerictypes.pyi index 4a3e02c9afa6..75d108ce5a0f 100644 --- a/numpy/typing/tests/data/reveal/numerictypes.pyi +++ b/numpy/typing/tests/data/reveal/numerictypes.pyi @@ -2,50 +2,15 @@ from typing import Literal, assert_type import numpy as np -assert_type( - np.ScalarType, - tuple[ - type[int], - type[float], - type[complex], - type[bool], - type[bytes], - type[str], - type[memoryview], - type[np.bool], - type[np.csingle], - type[np.cdouble], - type[np.clongdouble], - type[np.half], - type[np.single], - type[np.double], - type[np.longdouble], - type[np.byte], - type[np.short], - type[np.intc], - type[np.long], - type[np.longlong], - type[np.timedelta64], - type[np.datetime64], - type[np.object_], - type[np.bytes_], - type[np.str_], - type[np.ubyte], - type[np.ushort], - type[np.uintc], - type[np.ulong], - type[np.ulonglong], - type[np.void], - ], -) assert_type(np.ScalarType[0], type[int]) assert_type(np.ScalarType[3], type[bool]) -assert_type(np.ScalarType[8], type[np.csingle]) -assert_type(np.ScalarType[10], type[np.clongdouble]) +assert_type(np.ScalarType[8], type[np.complex64]) +assert_type(np.ScalarType[9], type[np.complex128]) +assert_type(np.ScalarType[-1], type[np.void]) assert_type(np.bool_(object()), np.bool) assert_type(np.typecodes["Character"], Literal["c"]) assert_type(np.typecodes["Complex"], Literal["FDG"]) assert_type(np.typecodes["All"], Literal["?bhilqnpBHILQNPefdgFDGSUVOMm"]) -assert_type(np.sctypeDict['uint8'], type[np.generic]) +assert_type(np.sctypeDict["uint8"], type[np.generic]) diff --git a/numpy/typing/tests/data/reveal/polynomial_polybase.pyi b/numpy/typing/tests/data/reveal/polynomial_polybase.pyi index bb927035e40c..faba91273c91 100644 --- a/numpy/typing/tests/data/reveal/polynomial_polybase.pyi +++ b/numpy/typing/tests/data/reveal/polynomial_polybase.pyi @@ -1,32 +1,29 @@ from collections.abc import Sequence from decimal import Decimal -from fractions import Fraction -from typing import Any, LiteralString, TypeAlias, TypeVar, assert_type -from typing import Literal as L +from typing import Any, Literal as L, assert_type import numpy as np import numpy.polynomial as npp import numpy.typing as npt -_Ar_x: TypeAlias = npt.NDArray[np.inexact | np.object_] -_Ar_f: TypeAlias = npt.NDArray[np.floating] -_Ar_c: TypeAlias = npt.NDArray[np.complexfloating] -_Ar_O: TypeAlias = npt.NDArray[np.object_] +type _Ar_x = npt.NDArray[np.inexact | np.object_] +type _Ar_f = npt.NDArray[np.floating] +type _Ar_c = npt.NDArray[np.complexfloating] +type _Ar_O = npt.NDArray[np.object_] -_Ar_x_n: TypeAlias = np.ndarray[tuple[int], np.dtype[np.inexact | np.object_]] -_Ar_f_n: TypeAlias = np.ndarray[tuple[int], np.dtype[np.floating]] -_Ar_c_n: TypeAlias = np.ndarray[tuple[int], np.dtype[np.complexfloating]] -_Ar_O_n: TypeAlias = np.ndarray[tuple[int], np.dtype[np.object_]] +type _Ar_x_n = np.ndarray[tuple[int], np.dtype[np.inexact | np.object_]] +type _Ar_f_n = np.ndarray[tuple[int], np.dtype[np.floating]] +type _Ar_c_n = np.ndarray[tuple[int], np.dtype[np.complexfloating]] +type _Ar_O_n = np.ndarray[tuple[int], np.dtype[np.object_]] -_Ar_x_2: TypeAlias = np.ndarray[tuple[L[2]], np.dtype[np.inexact | np.object_]] -_Ar_f_2: TypeAlias = np.ndarray[tuple[L[2]], np.dtype[np.floating]] -_Ar_c_2: TypeAlias = np.ndarray[tuple[L[2]], np.dtype[np.complexfloating]] -_Ar_O_2: TypeAlias = np.ndarray[tuple[L[2]], np.dtype[np.object_]] +type _Ar_x_2 = np.ndarray[tuple[L[2]], np.dtype[np.float64 | Any]] +type _Ar_f_2 = np.ndarray[tuple[L[2]], np.dtype[np.floating]] +type _Ar_c_2 = np.ndarray[tuple[L[2]], np.dtype[np.complexfloating]] +type _Ar_O_2 = np.ndarray[tuple[L[2]], np.dtype[np.object_]] -_ScalarT = TypeVar("_ScalarT", bound=np.generic) -_Ar_1d: TypeAlias = np.ndarray[tuple[int], np.dtype[_ScalarT]] +type _Ar_1d[ScalarT: np.generic] = np.ndarray[tuple[int], np.dtype[ScalarT]] -_BasisName: TypeAlias = L["X"] +type _BasisName = L["X"] SC_i: np.int_ SC_i_co: int | np.int_ @@ -67,11 +64,11 @@ PS_all: ( # static- and classmethods assert_type(type(PS_poly).basis_name, None) -assert_type(type(PS_cheb).basis_name, L['T']) -assert_type(type(PS_herm).basis_name, L['H']) -assert_type(type(PS_herme).basis_name, L['He']) -assert_type(type(PS_lag).basis_name, L['L']) -assert_type(type(PS_leg).basis_name, L['P']) +assert_type(type(PS_cheb).basis_name, L["T"]) +assert_type(type(PS_herm).basis_name, L["H"]) +assert_type(type(PS_herme).basis_name, L["He"]) +assert_type(type(PS_lag).basis_name, L["L"]) +assert_type(type(PS_leg).basis_name, L["P"]) assert_type(type(PS_all).__hash__, None) assert_type(type(PS_all).__array_ufunc__, None) @@ -91,10 +88,10 @@ assert_type(type(PS_leg).fromroots(SQ_O), npp.Legendre) assert_type(type(PS_leg).fromroots(AR_O_co), npp.Legendre) assert_type(type(PS_poly).identity(), npp.Polynomial) -assert_type(type(PS_cheb).identity(symbol='z'), npp.Chebyshev) +assert_type(type(PS_cheb).identity(symbol="z"), npp.Chebyshev) assert_type(type(PS_lag).basis(SC_i), npp.Laguerre) -assert_type(type(PS_leg).basis(32, symbol='u'), npp.Legendre) +assert_type(type(PS_leg).basis(32, symbol="u"), npp.Legendre) assert_type(type(PS_herm).cast(PS_poly), npp.Hermite) assert_type(type(PS_herme).cast(PS_leg), npp.HermiteE) @@ -104,7 +101,7 @@ assert_type(type(PS_herme).cast(PS_leg), npp.HermiteE) assert_type(PS_all.coef, _Ar_x_n) assert_type(PS_all.domain, _Ar_x_2) assert_type(PS_all.window, _Ar_x_2) -assert_type(PS_all.symbol, LiteralString) +assert_type(PS_all.symbol, str) # instance methods @@ -114,7 +111,7 @@ assert_type(PS_all.has_samewindow(PS_all), bool) assert_type(PS_all.has_sametype(PS_all), bool) assert_type(PS_poly.has_sametype(PS_poly), bool) assert_type(PS_poly.has_sametype(PS_leg), bool) -assert_type(PS_poly.has_sametype(NotADirectoryError), L[False]) +assert_type(PS_poly.has_sametype(NotADirectoryError), bool) assert_type(PS_poly.copy(), npp.Polynomial) assert_type(PS_cheb.copy(), npp.Chebyshev) @@ -123,7 +120,7 @@ assert_type(PS_herme.copy(), npp.HermiteE) assert_type(PS_lag.copy(), npp.Laguerre) assert_type(PS_leg.copy(), npp.Legendre) -assert_type(PS_leg.cutdeg(), npp.Legendre) +assert_type(PS_leg.cutdeg(3), npp.Legendre) assert_type(PS_leg.trim(), npp.Legendre) assert_type(PS_leg.trim(tol=SC_f_co), npp.Legendre) assert_type(PS_leg.truncate(SC_i_co), npp.Legendre) @@ -173,17 +170,16 @@ assert_type(repr(PS_all), str) assert_type(format(PS_all), str) assert_type(len(PS_all), int) -assert_type(next(iter(PS_all)), np.inexact | object) - -assert_type(PS_all(SC_f_co), np.float64 | np.complex128) -assert_type(PS_all(SC_c_co), np.complex128) -assert_type(PS_all(Decimal()), np.float64 | np.complex128) -assert_type(PS_all(Fraction()), np.float64 | np.complex128) -assert_type(PS_poly(SQ_f), npt.NDArray[np.float64] | npt.NDArray[np.complex128] | npt.NDArray[np.object_]) -assert_type(PS_poly(SQ_c), npt.NDArray[np.complex128] | npt.NDArray[np.object_]) +assert_type(next(iter(PS_all)), np.float64 | Any) + +assert_type(PS_all(SC_f_co), np.float64 | Any) +assert_type(PS_all(SC_c_co), np.complex128 | Any) +assert_type(PS_all(Decimal()), np.float64 | Any) +assert_type(PS_poly(SQ_f), npt.NDArray[np.float64 | Any]) +assert_type(PS_poly(SQ_c), npt.NDArray[np.complex128 | Any]) assert_type(PS_poly(SQ_O), npt.NDArray[np.object_]) -assert_type(PS_poly(AR_f), npt.NDArray[np.float64] | npt.NDArray[np.complex128] | npt.NDArray[np.object_]) -assert_type(PS_poly(AR_c), npt.NDArray[np.complex128] | npt.NDArray[np.object_]) +assert_type(PS_poly(AR_f), npt.NDArray[np.float64 | Any]) +assert_type(PS_poly(AR_c), npt.NDArray[np.complex128 | Any]) assert_type(PS_poly(AR_O), npt.NDArray[np.object_]) assert_type(PS_all(PS_poly), npp.Polynomial) diff --git a/numpy/typing/tests/data/reveal/polynomial_polyutils.pyi b/numpy/typing/tests/data/reveal/polynomial_polyutils.pyi index 45522e72102f..9c5aff1117dc 100644 --- a/numpy/typing/tests/data/reveal/polynomial_polyutils.pyi +++ b/numpy/typing/tests/data/reveal/polynomial_polyutils.pyi @@ -1,21 +1,20 @@ from collections.abc import Sequence from decimal import Decimal from fractions import Fraction -from typing import Any, TypeAlias, assert_type -from typing import Literal as L +from typing import Literal as L, assert_type import numpy as np import numpy.polynomial.polyutils as pu import numpy.typing as npt from numpy.polynomial._polytypes import _Tuple2 -_ArrFloat1D: TypeAlias = np.ndarray[tuple[int], np.dtype[np.floating]] -_ArrComplex1D: TypeAlias = np.ndarray[tuple[int], np.dtype[np.complexfloating]] -_ArrObject1D: TypeAlias = np.ndarray[tuple[int], np.dtype[np.object_]] +type _ArrFloat1D = np.ndarray[tuple[int], np.dtype[np.floating]] +type _ArrComplex1D = np.ndarray[tuple[int], np.dtype[np.complexfloating]] +type _ArrObject1D = np.ndarray[tuple[int], np.dtype[np.object_]] -_ArrFloat1D_2: TypeAlias = np.ndarray[tuple[L[2]], np.dtype[np.float64]] -_ArrComplex1D_2: TypeAlias = np.ndarray[tuple[L[2]], np.dtype[np.complex128]] -_ArrObject1D_2: TypeAlias = np.ndarray[tuple[L[2]], np.dtype[np.object_]] +type _ArrFloat1D_2 = np.ndarray[tuple[L[2]], np.dtype[np.float64]] +type _ArrComplex1D_2 = np.ndarray[tuple[L[2]], np.dtype[np.complex128]] +type _ArrObject1D_2 = np.ndarray[tuple[L[2]], np.dtype[np.object_]] num_int: int num_float: float diff --git a/numpy/typing/tests/data/reveal/polynomial_series.pyi b/numpy/typing/tests/data/reveal/polynomial_series.pyi index 93f0799c818d..b87ba4fb2677 100644 --- a/numpy/typing/tests/data/reveal/polynomial_series.pyi +++ b/numpy/typing/tests/data/reveal/polynomial_series.pyi @@ -1,15 +1,15 @@ from collections.abc import Sequence -from typing import Any, TypeAlias, assert_type +from typing import Any, assert_type import numpy as np import numpy.polynomial as npp import numpy.typing as npt -_ArrFloat1D: TypeAlias = np.ndarray[tuple[int], np.dtype[np.floating]] -_ArrFloat1D64: TypeAlias = np.ndarray[tuple[int], np.dtype[np.float64]] -_ArrComplex1D: TypeAlias = np.ndarray[tuple[int], np.dtype[np.complexfloating]] -_ArrComplex1D128: TypeAlias = np.ndarray[tuple[int], np.dtype[np.complex128]] -_ArrObject1D: TypeAlias = np.ndarray[tuple[int], np.dtype[np.object_]] +type _ArrFloat1D = np.ndarray[tuple[int], np.dtype[np.floating]] +type _ArrFloat1D64 = np.ndarray[tuple[int], np.dtype[np.float64]] +type _ArrComplex1D = np.ndarray[tuple[int], np.dtype[np.complexfloating]] +type _ArrComplex1D128 = np.ndarray[tuple[int], np.dtype[np.complex128]] +type _ArrObject1D = np.ndarray[tuple[int], np.dtype[np.object_]] AR_b: npt.NDArray[np.bool] AR_u4: npt.NDArray[np.uint32] @@ -79,12 +79,12 @@ assert_type(npp.polynomial.polyval3d(AR_f8, AR_f8, AR_f8, AR_i8), npt.NDArray[np assert_type(npp.polynomial.polyval3d(AR_i8, AR_i8, AR_i8, AR_c16), npt.NDArray[np.complexfloating]) assert_type(npp.polynomial.polyval3d(AR_O, AR_O, AR_O, AR_O), npt.NDArray[np.object_]) -assert_type(npp.polynomial.polyvalfromroots(AR_b, AR_b), npt.NDArray[np.floating]) -assert_type(npp.polynomial.polyvalfromroots(AR_u4, AR_b), npt.NDArray[np.floating]) -assert_type(npp.polynomial.polyvalfromroots(AR_i8, AR_i8), npt.NDArray[np.floating]) -assert_type(npp.polynomial.polyvalfromroots(AR_f8, AR_i8), npt.NDArray[np.floating]) -assert_type(npp.polynomial.polyvalfromroots(AR_i8, AR_c16), npt.NDArray[np.complexfloating]) -assert_type(npp.polynomial.polyvalfromroots(AR_O, AR_O), npt.NDArray[np.object_]) +assert_type(npp.polynomial.polyvalfromroots(AR_b, AR_b), npt.NDArray[np.float64 | Any]) +assert_type(npp.polynomial.polyvalfromroots(AR_u4, AR_b), npt.NDArray[np.float64 | Any]) +assert_type(npp.polynomial.polyvalfromroots(AR_i8, AR_i8), npt.NDArray[np.float64 | Any]) +assert_type(npp.polynomial.polyvalfromroots(AR_f8, AR_i8), npt.NDArray[np.float64 | Any]) +assert_type(npp.polynomial.polyvalfromroots(AR_i8, AR_c16), npt.NDArray[np.complex128 | Any]) +assert_type(npp.polynomial.polyvalfromroots(AR_O, AR_O), npt.NDArray[np.object_ | Any]) assert_type(npp.polynomial.polyvander(AR_f8, 3), npt.NDArray[np.floating]) assert_type(npp.polynomial.polyvander(AR_c16, 3), npt.NDArray[np.complexfloating]) diff --git a/numpy/typing/tests/data/reveal/random.pyi b/numpy/typing/tests/data/reveal/random.pyi index e188eb02893f..4d00ef0d99aa 100644 --- a/numpy/typing/tests/data/reveal/random.pyi +++ b/numpy/typing/tests/data/reveal/random.pyi @@ -349,11 +349,11 @@ assert_type(def_gen.gumbel(0.5, 0.5), float) assert_type(def_gen.gumbel(0.5, 0.5, size=None), float) assert_type(def_gen.gumbel(0.5, 0.5, size=1), npt.NDArray[np.float64]) assert_type(def_gen.gumbel(D_arr_0p5, 0.5), npt.NDArray[np.float64]) -assert_type(def_gen.gumbel(0.5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.gumbel(0.5, D_arr_0p5), npt.NDArray[np.float64] | Any) assert_type(def_gen.gumbel(D_arr_0p5, 0.5, size=1), npt.NDArray[np.float64]) assert_type(def_gen.gumbel(0.5, D_arr_0p5, size=1), npt.NDArray[np.float64]) assert_type(def_gen.gumbel(D_arr_like_0p5, 0.5), npt.NDArray[np.float64]) -assert_type(def_gen.gumbel(0.5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.gumbel(0.5, D_arr_like_0p5), npt.NDArray[np.float64] | Any) assert_type(def_gen.gumbel(D_arr_0p5, D_arr_0p5), npt.NDArray[np.float64]) assert_type(def_gen.gumbel(D_arr_like_0p5, D_arr_like_0p5), npt.NDArray[np.float64]) assert_type(def_gen.gumbel(D_arr_0p5, D_arr_0p5, size=1), npt.NDArray[np.float64]) @@ -363,11 +363,11 @@ assert_type(def_gen.laplace(0.5, 0.5), float) assert_type(def_gen.laplace(0.5, 0.5, size=None), float) assert_type(def_gen.laplace(0.5, 0.5, size=1), npt.NDArray[np.float64]) assert_type(def_gen.laplace(D_arr_0p5, 0.5), npt.NDArray[np.float64]) -assert_type(def_gen.laplace(0.5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.laplace(0.5, D_arr_0p5), npt.NDArray[np.float64] | Any) assert_type(def_gen.laplace(D_arr_0p5, 0.5, size=1), npt.NDArray[np.float64]) assert_type(def_gen.laplace(0.5, D_arr_0p5, size=1), npt.NDArray[np.float64]) assert_type(def_gen.laplace(D_arr_like_0p5, 0.5), npt.NDArray[np.float64]) -assert_type(def_gen.laplace(0.5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.laplace(0.5, D_arr_like_0p5), npt.NDArray[np.float64] | Any) assert_type(def_gen.laplace(D_arr_0p5, D_arr_0p5), npt.NDArray[np.float64]) assert_type(def_gen.laplace(D_arr_like_0p5, D_arr_like_0p5), npt.NDArray[np.float64]) assert_type(def_gen.laplace(D_arr_0p5, D_arr_0p5, size=1), npt.NDArray[np.float64]) @@ -377,11 +377,11 @@ assert_type(def_gen.logistic(0.5, 0.5), float) assert_type(def_gen.logistic(0.5, 0.5, size=None), float) assert_type(def_gen.logistic(0.5, 0.5, size=1), npt.NDArray[np.float64]) assert_type(def_gen.logistic(D_arr_0p5, 0.5), npt.NDArray[np.float64]) -assert_type(def_gen.logistic(0.5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.logistic(0.5, D_arr_0p5), npt.NDArray[np.float64] | Any) assert_type(def_gen.logistic(D_arr_0p5, 0.5, size=1), npt.NDArray[np.float64]) assert_type(def_gen.logistic(0.5, D_arr_0p5, size=1), npt.NDArray[np.float64]) assert_type(def_gen.logistic(D_arr_like_0p5, 0.5), npt.NDArray[np.float64]) -assert_type(def_gen.logistic(0.5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.logistic(0.5, D_arr_like_0p5), npt.NDArray[np.float64] | Any) assert_type(def_gen.logistic(D_arr_0p5, D_arr_0p5), npt.NDArray[np.float64]) assert_type(def_gen.logistic(D_arr_like_0p5, D_arr_like_0p5), npt.NDArray[np.float64]) assert_type(def_gen.logistic(D_arr_0p5, D_arr_0p5, size=1), npt.NDArray[np.float64]) @@ -419,11 +419,11 @@ assert_type(def_gen.normal(0.5, 0.5), float) assert_type(def_gen.normal(0.5, 0.5, size=None), float) assert_type(def_gen.normal(0.5, 0.5, size=1), npt.NDArray[np.float64]) assert_type(def_gen.normal(D_arr_0p5, 0.5), npt.NDArray[np.float64]) -assert_type(def_gen.normal(0.5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.normal(0.5, D_arr_0p5), npt.NDArray[np.float64] | Any) assert_type(def_gen.normal(D_arr_0p5, 0.5, size=1), npt.NDArray[np.float64]) assert_type(def_gen.normal(0.5, D_arr_0p5, size=1), npt.NDArray[np.float64]) assert_type(def_gen.normal(D_arr_like_0p5, 0.5), npt.NDArray[np.float64]) -assert_type(def_gen.normal(0.5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.normal(0.5, D_arr_like_0p5), npt.NDArray[np.float64] | Any) assert_type(def_gen.normal(D_arr_0p5, D_arr_0p5), npt.NDArray[np.float64]) assert_type(def_gen.normal(D_arr_like_0p5, D_arr_like_0p5), npt.NDArray[np.float64]) assert_type(def_gen.normal(D_arr_0p5, D_arr_0p5, size=1), npt.NDArray[np.float64]) @@ -488,14 +488,14 @@ assert_type(def_gen.negative_binomial(I_arr_like_10, D_arr_like_0p5, size=1), np assert_type(def_gen.hypergeometric(20, 20, 10), int) assert_type(def_gen.hypergeometric(20, 20, 10, size=None), int) assert_type(def_gen.hypergeometric(20, 20, 10, size=1), npt.NDArray[np.int64]) -assert_type(def_gen.hypergeometric(I_arr_20, 20, 10), npt.NDArray[np.int64]) -assert_type(def_gen.hypergeometric(20, I_arr_20, 10), npt.NDArray[np.int64]) +assert_type(def_gen.hypergeometric(I_arr_20, 20, 10), npt.NDArray[np.int64] | Any) +assert_type(def_gen.hypergeometric(20, I_arr_20, 10), npt.NDArray[np.int64] | Any) assert_type(def_gen.hypergeometric(I_arr_20, 20, I_arr_like_10, size=1), npt.NDArray[np.int64]) assert_type(def_gen.hypergeometric(20, I_arr_20, 10, size=1), npt.NDArray[np.int64]) -assert_type(def_gen.hypergeometric(I_arr_like_20, 20, I_arr_10), npt.NDArray[np.int64]) -assert_type(def_gen.hypergeometric(20, I_arr_like_20, 10), npt.NDArray[np.int64]) -assert_type(def_gen.hypergeometric(I_arr_20, I_arr_20, 10), npt.NDArray[np.int64]) -assert_type(def_gen.hypergeometric(I_arr_like_20, I_arr_like_20, 10), npt.NDArray[np.int64]) +assert_type(def_gen.hypergeometric(I_arr_like_20, 20, I_arr_10), npt.NDArray[np.int64] | Any) +assert_type(def_gen.hypergeometric(20, I_arr_like_20, 10), npt.NDArray[np.int64] | Any) +assert_type(def_gen.hypergeometric(I_arr_20, I_arr_20, 10), npt.NDArray[np.int64] | Any) +assert_type(def_gen.hypergeometric(I_arr_like_20, I_arr_like_20, 10), npt.NDArray[np.int64] | Any) assert_type(def_gen.hypergeometric(I_arr_20, I_arr_20, I_arr_10, size=1), npt.NDArray[np.int64]) assert_type(def_gen.hypergeometric(I_arr_like_20, I_arr_like_20, I_arr_like_10, size=1), npt.NDArray[np.int64]) @@ -503,8 +503,8 @@ I_int64_100: npt.NDArray[np.int64] = np.array([100], dtype=np.int64) assert_type(def_gen.integers(0, 100), np.int64) assert_type(def_gen.integers(100), np.int64) -assert_type(def_gen.integers([100]), npt.NDArray[np.int64]) -assert_type(def_gen.integers(0, [100]), npt.NDArray[np.int64]) +assert_type(def_gen.integers([100]), npt.NDArray[np.int64] | Any) +assert_type(def_gen.integers(0, [100]), npt.NDArray[np.int64] | Any) I_bool_low: npt.NDArray[np.bool] = np.array([0], dtype=np.bool) I_bool_low_like: list[int] = [0] @@ -515,107 +515,59 @@ assert_type(def_gen.integers(2, dtype=bool), bool) assert_type(def_gen.integers(0, 2, dtype=bool), bool) assert_type(def_gen.integers(1, dtype=bool, endpoint=True), bool) assert_type(def_gen.integers(0, 1, dtype=bool, endpoint=True), bool) -assert_type(def_gen.integers(I_bool_low_like, 1, dtype=bool, endpoint=True), npt.NDArray[np.bool]) -assert_type(def_gen.integers(I_bool_high_open, dtype=bool), npt.NDArray[np.bool]) -assert_type(def_gen.integers(I_bool_low, I_bool_high_open, dtype=bool), npt.NDArray[np.bool]) -assert_type(def_gen.integers(0, I_bool_high_open, dtype=bool), npt.NDArray[np.bool]) -assert_type(def_gen.integers(I_bool_high_closed, dtype=bool, endpoint=True), npt.NDArray[np.bool]) -assert_type(def_gen.integers(I_bool_low, I_bool_high_closed, dtype=bool, endpoint=True), npt.NDArray[np.bool]) -assert_type(def_gen.integers(0, I_bool_high_closed, dtype=bool, endpoint=True), npt.NDArray[np.bool]) +assert_type(def_gen.integers(I_bool_low_like, 1, dtype=bool, endpoint=True), npt.NDArray[Any] | Any) +assert_type(def_gen.integers(I_bool_high_open, dtype=bool), npt.NDArray[Any] | Any) +assert_type(def_gen.integers(I_bool_low, I_bool_high_open, dtype=bool), npt.NDArray[Any] | Any) +assert_type(def_gen.integers(0, I_bool_high_open, dtype=bool), npt.NDArray[Any] | Any) +assert_type(def_gen.integers(I_bool_high_closed, dtype=bool, endpoint=True), npt.NDArray[Any] | Any) +assert_type(def_gen.integers(I_bool_low, I_bool_high_closed, dtype=bool, endpoint=True), npt.NDArray[Any] | Any) +assert_type(def_gen.integers(0, I_bool_high_closed, dtype=bool, endpoint=True), npt.NDArray[Any] | Any) assert_type(def_gen.integers(2, dtype=np.bool), np.bool) assert_type(def_gen.integers(0, 2, dtype=np.bool), np.bool) assert_type(def_gen.integers(1, dtype=np.bool, endpoint=True), np.bool) assert_type(def_gen.integers(0, 1, dtype=np.bool, endpoint=True), np.bool) -assert_type(def_gen.integers(I_bool_low_like, 1, dtype=np.bool, endpoint=True), npt.NDArray[np.bool]) -assert_type(def_gen.integers(I_bool_high_open, dtype=np.bool), npt.NDArray[np.bool]) -assert_type(def_gen.integers(I_bool_low, I_bool_high_open, dtype=np.bool), npt.NDArray[np.bool]) -assert_type(def_gen.integers(0, I_bool_high_open, dtype=np.bool), npt.NDArray[np.bool]) -assert_type(def_gen.integers(I_bool_high_closed, dtype=np.bool, endpoint=True), npt.NDArray[np.bool]) -assert_type(def_gen.integers(I_bool_low, I_bool_high_closed, dtype=np.bool, endpoint=True), npt.NDArray[np.bool]) -assert_type(def_gen.integers(0, I_bool_high_closed, dtype=np.bool, endpoint=True), npt.NDArray[np.bool]) +assert_type(def_gen.integers(I_bool_low_like, 1, dtype=np.bool, endpoint=True), npt.NDArray[np.bool] | Any) +assert_type(def_gen.integers(I_bool_high_open, dtype=np.bool), npt.NDArray[np.bool] | Any) +assert_type(def_gen.integers(I_bool_low, I_bool_high_open, dtype=np.bool), npt.NDArray[np.bool] | Any) +assert_type(def_gen.integers(0, I_bool_high_open, dtype=np.bool), npt.NDArray[np.bool] | Any) +assert_type(def_gen.integers(I_bool_high_closed, dtype=np.bool, endpoint=True), npt.NDArray[np.bool] | Any) +assert_type(def_gen.integers(I_bool_low, I_bool_high_closed, dtype=np.bool, endpoint=True), npt.NDArray[np.bool] | Any) +assert_type(def_gen.integers(0, I_bool_high_closed, dtype=np.bool, endpoint=True), npt.NDArray[np.bool] | Any) I_u1_low: npt.NDArray[np.uint8] = np.array([0], dtype=np.uint8) I_u1_low_like: list[int] = [0] I_u1_high_open: npt.NDArray[np.uint8] = np.array([255], dtype=np.uint8) I_u1_high_closed: npt.NDArray[np.uint8] = np.array([255], dtype=np.uint8) -assert_type(def_gen.integers(256, dtype="u1"), np.uint8) -assert_type(def_gen.integers(0, 256, dtype="u1"), np.uint8) -assert_type(def_gen.integers(255, dtype="u1", endpoint=True), np.uint8) -assert_type(def_gen.integers(0, 255, dtype="u1", endpoint=True), np.uint8) -assert_type(def_gen.integers(I_u1_low_like, 255, dtype="u1", endpoint=True), npt.NDArray[np.uint8]) -assert_type(def_gen.integers(I_u1_high_open, dtype="u1"), npt.NDArray[np.uint8]) -assert_type(def_gen.integers(I_u1_low, I_u1_high_open, dtype="u1"), npt.NDArray[np.uint8]) -assert_type(def_gen.integers(0, I_u1_high_open, dtype="u1"), npt.NDArray[np.uint8]) -assert_type(def_gen.integers(I_u1_high_closed, dtype="u1", endpoint=True), npt.NDArray[np.uint8]) -assert_type(def_gen.integers(I_u1_low, I_u1_high_closed, dtype="u1", endpoint=True), npt.NDArray[np.uint8]) -assert_type(def_gen.integers(0, I_u1_high_closed, dtype="u1", endpoint=True), npt.NDArray[np.uint8]) - -assert_type(def_gen.integers(256, dtype="uint8"), np.uint8) -assert_type(def_gen.integers(0, 256, dtype="uint8"), np.uint8) -assert_type(def_gen.integers(255, dtype="uint8", endpoint=True), np.uint8) -assert_type(def_gen.integers(0, 255, dtype="uint8", endpoint=True), np.uint8) -assert_type(def_gen.integers(I_u1_low_like, 255, dtype="uint8", endpoint=True), npt.NDArray[np.uint8]) -assert_type(def_gen.integers(I_u1_high_open, dtype="uint8"), npt.NDArray[np.uint8]) -assert_type(def_gen.integers(I_u1_low, I_u1_high_open, dtype="uint8"), npt.NDArray[np.uint8]) -assert_type(def_gen.integers(0, I_u1_high_open, dtype="uint8"), npt.NDArray[np.uint8]) -assert_type(def_gen.integers(I_u1_high_closed, dtype="uint8", endpoint=True), npt.NDArray[np.uint8]) -assert_type(def_gen.integers(I_u1_low, I_u1_high_closed, dtype="uint8", endpoint=True), npt.NDArray[np.uint8]) -assert_type(def_gen.integers(0, I_u1_high_closed, dtype="uint8", endpoint=True), npt.NDArray[np.uint8]) - assert_type(def_gen.integers(256, dtype=np.uint8), np.uint8) assert_type(def_gen.integers(0, 256, dtype=np.uint8), np.uint8) assert_type(def_gen.integers(255, dtype=np.uint8, endpoint=True), np.uint8) assert_type(def_gen.integers(0, 255, dtype=np.uint8, endpoint=True), np.uint8) -assert_type(def_gen.integers(I_u1_low_like, 255, dtype=np.uint8, endpoint=True), npt.NDArray[np.uint8]) -assert_type(def_gen.integers(I_u1_high_open, dtype=np.uint8), npt.NDArray[np.uint8]) -assert_type(def_gen.integers(I_u1_low, I_u1_high_open, dtype=np.uint8), npt.NDArray[np.uint8]) -assert_type(def_gen.integers(0, I_u1_high_open, dtype=np.uint8), npt.NDArray[np.uint8]) -assert_type(def_gen.integers(I_u1_high_closed, dtype=np.uint8, endpoint=True), npt.NDArray[np.uint8]) -assert_type(def_gen.integers(I_u1_low, I_u1_high_closed, dtype=np.uint8, endpoint=True), npt.NDArray[np.uint8]) -assert_type(def_gen.integers(0, I_u1_high_closed, dtype=np.uint8, endpoint=True), npt.NDArray[np.uint8]) +assert_type(def_gen.integers(I_u1_low_like, 255, dtype=np.uint8, endpoint=True), npt.NDArray[np.uint8] | Any) +assert_type(def_gen.integers(I_u1_high_open, dtype=np.uint8), npt.NDArray[np.uint8] | Any) +assert_type(def_gen.integers(I_u1_low, I_u1_high_open, dtype=np.uint8), npt.NDArray[np.uint8] | Any) +assert_type(def_gen.integers(0, I_u1_high_open, dtype=np.uint8), npt.NDArray[np.uint8] | Any) +assert_type(def_gen.integers(I_u1_high_closed, dtype=np.uint8, endpoint=True), npt.NDArray[np.uint8] | Any) +assert_type(def_gen.integers(I_u1_low, I_u1_high_closed, dtype=np.uint8, endpoint=True), npt.NDArray[np.uint8] | Any) +assert_type(def_gen.integers(0, I_u1_high_closed, dtype=np.uint8, endpoint=True), npt.NDArray[np.uint8] | Any) I_u2_low: npt.NDArray[np.uint16] = np.array([0], dtype=np.uint16) I_u2_low_like: list[int] = [0] I_u2_high_open: npt.NDArray[np.uint16] = np.array([65535], dtype=np.uint16) I_u2_high_closed: npt.NDArray[np.uint16] = np.array([65535], dtype=np.uint16) -assert_type(def_gen.integers(65536, dtype="u2"), np.uint16) -assert_type(def_gen.integers(0, 65536, dtype="u2"), np.uint16) -assert_type(def_gen.integers(65535, dtype="u2", endpoint=True), np.uint16) -assert_type(def_gen.integers(0, 65535, dtype="u2", endpoint=True), np.uint16) -assert_type(def_gen.integers(I_u2_low_like, 65535, dtype="u2", endpoint=True), npt.NDArray[np.uint16]) -assert_type(def_gen.integers(I_u2_high_open, dtype="u2"), npt.NDArray[np.uint16]) -assert_type(def_gen.integers(I_u2_low, I_u2_high_open, dtype="u2"), npt.NDArray[np.uint16]) -assert_type(def_gen.integers(0, I_u2_high_open, dtype="u2"), npt.NDArray[np.uint16]) -assert_type(def_gen.integers(I_u2_high_closed, dtype="u2", endpoint=True), npt.NDArray[np.uint16]) -assert_type(def_gen.integers(I_u2_low, I_u2_high_closed, dtype="u2", endpoint=True), npt.NDArray[np.uint16]) -assert_type(def_gen.integers(0, I_u2_high_closed, dtype="u2", endpoint=True), npt.NDArray[np.uint16]) - -assert_type(def_gen.integers(65536, dtype="uint16"), np.uint16) -assert_type(def_gen.integers(0, 65536, dtype="uint16"), np.uint16) -assert_type(def_gen.integers(65535, dtype="uint16", endpoint=True), np.uint16) -assert_type(def_gen.integers(0, 65535, dtype="uint16", endpoint=True), np.uint16) -assert_type(def_gen.integers(I_u2_low_like, 65535, dtype="uint16", endpoint=True), npt.NDArray[np.uint16]) -assert_type(def_gen.integers(I_u2_high_open, dtype="uint16"), npt.NDArray[np.uint16]) -assert_type(def_gen.integers(I_u2_low, I_u2_high_open, dtype="uint16"), npt.NDArray[np.uint16]) -assert_type(def_gen.integers(0, I_u2_high_open, dtype="uint16"), npt.NDArray[np.uint16]) -assert_type(def_gen.integers(I_u2_high_closed, dtype="uint16", endpoint=True), npt.NDArray[np.uint16]) -assert_type(def_gen.integers(I_u2_low, I_u2_high_closed, dtype="uint16", endpoint=True), npt.NDArray[np.uint16]) -assert_type(def_gen.integers(0, I_u2_high_closed, dtype="uint16", endpoint=True), npt.NDArray[np.uint16]) - assert_type(def_gen.integers(65536, dtype=np.uint16), np.uint16) assert_type(def_gen.integers(0, 65536, dtype=np.uint16), np.uint16) assert_type(def_gen.integers(65535, dtype=np.uint16, endpoint=True), np.uint16) assert_type(def_gen.integers(0, 65535, dtype=np.uint16, endpoint=True), np.uint16) -assert_type(def_gen.integers(I_u2_low_like, 65535, dtype=np.uint16, endpoint=True), npt.NDArray[np.uint16]) -assert_type(def_gen.integers(I_u2_high_open, dtype=np.uint16), npt.NDArray[np.uint16]) -assert_type(def_gen.integers(I_u2_low, I_u2_high_open, dtype=np.uint16), npt.NDArray[np.uint16]) -assert_type(def_gen.integers(0, I_u2_high_open, dtype=np.uint16), npt.NDArray[np.uint16]) -assert_type(def_gen.integers(I_u2_high_closed, dtype=np.uint16, endpoint=True), npt.NDArray[np.uint16]) -assert_type(def_gen.integers(I_u2_low, I_u2_high_closed, dtype=np.uint16, endpoint=True), npt.NDArray[np.uint16]) -assert_type(def_gen.integers(0, I_u2_high_closed, dtype=np.uint16, endpoint=True), npt.NDArray[np.uint16]) +assert_type(def_gen.integers(I_u2_low_like, 65535, dtype=np.uint16, endpoint=True), npt.NDArray[np.uint16] | Any) +assert_type(def_gen.integers(I_u2_high_open, dtype=np.uint16), npt.NDArray[np.uint16] | Any) +assert_type(def_gen.integers(I_u2_low, I_u2_high_open, dtype=np.uint16), npt.NDArray[np.uint16] | Any) +assert_type(def_gen.integers(0, I_u2_high_open, dtype=np.uint16), npt.NDArray[np.uint16] | Any) +assert_type(def_gen.integers(I_u2_high_closed, dtype=np.uint16, endpoint=True), npt.NDArray[np.uint16] | Any) +assert_type(def_gen.integers(I_u2_low, I_u2_high_closed, dtype=np.uint16, endpoint=True), npt.NDArray[np.uint16] | Any) +assert_type(def_gen.integers(0, I_u2_high_closed, dtype=np.uint16, endpoint=True), npt.NDArray[np.uint16] | Any) I_u4_low: npt.NDArray[np.uint32] = np.array([0], dtype=np.uint32) I_u4_low_like: list[int] = [0] @@ -626,266 +578,122 @@ assert_type(def_gen.integers(4294967296, dtype=np.int_), np.int_) assert_type(def_gen.integers(0, 4294967296, dtype=np.int_), np.int_) assert_type(def_gen.integers(4294967295, dtype=np.int_, endpoint=True), np.int_) assert_type(def_gen.integers(0, 4294967295, dtype=np.int_, endpoint=True), np.int_) -assert_type(def_gen.integers(I_u4_low_like, 4294967295, dtype=np.int_, endpoint=True), npt.NDArray[np.int_]) -assert_type(def_gen.integers(I_u4_high_open, dtype=np.int_), npt.NDArray[np.int_]) -assert_type(def_gen.integers(I_u4_low, I_u4_high_open, dtype=np.int_), npt.NDArray[np.int_]) -assert_type(def_gen.integers(0, I_u4_high_open, dtype=np.int_), npt.NDArray[np.int_]) -assert_type(def_gen.integers(I_u4_high_closed, dtype=np.int_, endpoint=True), npt.NDArray[np.int_]) -assert_type(def_gen.integers(I_u4_low, I_u4_high_closed, dtype=np.int_, endpoint=True), npt.NDArray[np.int_]) -assert_type(def_gen.integers(0, I_u4_high_closed, dtype=np.int_, endpoint=True), npt.NDArray[np.int_]) - -assert_type(def_gen.integers(4294967296, dtype="u4"), np.uint32) -assert_type(def_gen.integers(0, 4294967296, dtype="u4"), np.uint32) -assert_type(def_gen.integers(4294967295, dtype="u4", endpoint=True), np.uint32) -assert_type(def_gen.integers(0, 4294967295, dtype="u4", endpoint=True), np.uint32) -assert_type(def_gen.integers(I_u4_low_like, 4294967295, dtype="u4", endpoint=True), npt.NDArray[np.uint32]) -assert_type(def_gen.integers(I_u4_high_open, dtype="u4"), npt.NDArray[np.uint32]) -assert_type(def_gen.integers(I_u4_low, I_u4_high_open, dtype="u4"), npt.NDArray[np.uint32]) -assert_type(def_gen.integers(0, I_u4_high_open, dtype="u4"), npt.NDArray[np.uint32]) -assert_type(def_gen.integers(I_u4_high_closed, dtype="u4", endpoint=True), npt.NDArray[np.uint32]) -assert_type(def_gen.integers(I_u4_low, I_u4_high_closed, dtype="u4", endpoint=True), npt.NDArray[np.uint32]) -assert_type(def_gen.integers(0, I_u4_high_closed, dtype="u4", endpoint=True), npt.NDArray[np.uint32]) - -assert_type(def_gen.integers(4294967296, dtype="uint32"), np.uint32) -assert_type(def_gen.integers(0, 4294967296, dtype="uint32"), np.uint32) -assert_type(def_gen.integers(4294967295, dtype="uint32", endpoint=True), np.uint32) -assert_type(def_gen.integers(0, 4294967295, dtype="uint32", endpoint=True), np.uint32) -assert_type(def_gen.integers(I_u4_low_like, 4294967295, dtype="uint32", endpoint=True), npt.NDArray[np.uint32]) -assert_type(def_gen.integers(I_u4_high_open, dtype="uint32"), npt.NDArray[np.uint32]) -assert_type(def_gen.integers(I_u4_low, I_u4_high_open, dtype="uint32"), npt.NDArray[np.uint32]) -assert_type(def_gen.integers(0, I_u4_high_open, dtype="uint32"), npt.NDArray[np.uint32]) -assert_type(def_gen.integers(I_u4_high_closed, dtype="uint32", endpoint=True), npt.NDArray[np.uint32]) -assert_type(def_gen.integers(I_u4_low, I_u4_high_closed, dtype="uint32", endpoint=True), npt.NDArray[np.uint32]) -assert_type(def_gen.integers(0, I_u4_high_closed, dtype="uint32", endpoint=True), npt.NDArray[np.uint32]) +assert_type(def_gen.integers(I_u4_low_like, 4294967295, dtype=np.int_, endpoint=True), npt.NDArray[np.int_] | Any) +assert_type(def_gen.integers(I_u4_high_open, dtype=np.int_), npt.NDArray[np.int_] | Any) +assert_type(def_gen.integers(I_u4_low, I_u4_high_open, dtype=np.int_), npt.NDArray[np.int_] | Any) +assert_type(def_gen.integers(0, I_u4_high_open, dtype=np.int_), npt.NDArray[np.int_] | Any) +assert_type(def_gen.integers(I_u4_high_closed, dtype=np.int_, endpoint=True), npt.NDArray[np.int_] | Any) +assert_type(def_gen.integers(I_u4_low, I_u4_high_closed, dtype=np.int_, endpoint=True), npt.NDArray[np.int_] | Any) +assert_type(def_gen.integers(0, I_u4_high_closed, dtype=np.int_, endpoint=True), npt.NDArray[np.int_] | Any) assert_type(def_gen.integers(4294967296, dtype=np.uint32), np.uint32) assert_type(def_gen.integers(0, 4294967296, dtype=np.uint32), np.uint32) assert_type(def_gen.integers(4294967295, dtype=np.uint32, endpoint=True), np.uint32) assert_type(def_gen.integers(0, 4294967295, dtype=np.uint32, endpoint=True), np.uint32) -assert_type(def_gen.integers(I_u4_low_like, 4294967295, dtype=np.uint32, endpoint=True), npt.NDArray[np.uint32]) -assert_type(def_gen.integers(I_u4_high_open, dtype=np.uint32), npt.NDArray[np.uint32]) -assert_type(def_gen.integers(I_u4_low, I_u4_high_open, dtype=np.uint32), npt.NDArray[np.uint32]) -assert_type(def_gen.integers(0, I_u4_high_open, dtype=np.uint32), npt.NDArray[np.uint32]) -assert_type(def_gen.integers(I_u4_high_closed, dtype=np.uint32, endpoint=True), npt.NDArray[np.uint32]) -assert_type(def_gen.integers(I_u4_low, I_u4_high_closed, dtype=np.uint32, endpoint=True), npt.NDArray[np.uint32]) -assert_type(def_gen.integers(0, I_u4_high_closed, dtype=np.uint32, endpoint=True), npt.NDArray[np.uint32]) +assert_type(def_gen.integers(I_u4_low_like, 4294967295, dtype=np.uint32, endpoint=True), npt.NDArray[np.uint32] | Any) +assert_type(def_gen.integers(I_u4_high_open, dtype=np.uint32), npt.NDArray[np.uint32] | Any) +assert_type(def_gen.integers(I_u4_low, I_u4_high_open, dtype=np.uint32), npt.NDArray[np.uint32] | Any) +assert_type(def_gen.integers(0, I_u4_high_open, dtype=np.uint32), npt.NDArray[np.uint32] | Any) +assert_type(def_gen.integers(I_u4_high_closed, dtype=np.uint32, endpoint=True), npt.NDArray[np.uint32] | Any) +assert_type(def_gen.integers(I_u4_low, I_u4_high_closed, dtype=np.uint32, endpoint=True), npt.NDArray[np.uint32] | Any) +assert_type(def_gen.integers(0, I_u4_high_closed, dtype=np.uint32, endpoint=True), npt.NDArray[np.uint32] | Any) assert_type(def_gen.integers(4294967296, dtype=np.uint), np.uint) assert_type(def_gen.integers(0, 4294967296, dtype=np.uint), np.uint) assert_type(def_gen.integers(4294967295, dtype=np.uint, endpoint=True), np.uint) assert_type(def_gen.integers(0, 4294967295, dtype=np.uint, endpoint=True), np.uint) -assert_type(def_gen.integers(I_u4_low_like, 4294967295, dtype=np.uint, endpoint=True), npt.NDArray[np.uint]) -assert_type(def_gen.integers(I_u4_high_open, dtype=np.uint), npt.NDArray[np.uint]) -assert_type(def_gen.integers(I_u4_low, I_u4_high_open, dtype=np.uint), npt.NDArray[np.uint]) -assert_type(def_gen.integers(0, I_u4_high_open, dtype=np.uint), npt.NDArray[np.uint]) -assert_type(def_gen.integers(I_u4_high_closed, dtype=np.uint, endpoint=True), npt.NDArray[np.uint]) -assert_type(def_gen.integers(I_u4_low, I_u4_high_closed, dtype=np.uint, endpoint=True), npt.NDArray[np.uint]) -assert_type(def_gen.integers(0, I_u4_high_closed, dtype=np.uint, endpoint=True), npt.NDArray[np.uint]) +assert_type(def_gen.integers(I_u4_low_like, 4294967295, dtype=np.uint, endpoint=True), npt.NDArray[np.uint] | Any) +assert_type(def_gen.integers(I_u4_high_open, dtype=np.uint), npt.NDArray[np.uint] | Any) +assert_type(def_gen.integers(I_u4_low, I_u4_high_open, dtype=np.uint), npt.NDArray[np.uint] | Any) +assert_type(def_gen.integers(0, I_u4_high_open, dtype=np.uint), npt.NDArray[np.uint] | Any) +assert_type(def_gen.integers(I_u4_high_closed, dtype=np.uint, endpoint=True), npt.NDArray[np.uint] | Any) +assert_type(def_gen.integers(I_u4_low, I_u4_high_closed, dtype=np.uint, endpoint=True), npt.NDArray[np.uint] | Any) +assert_type(def_gen.integers(0, I_u4_high_closed, dtype=np.uint, endpoint=True), npt.NDArray[np.uint] | Any) I_u8_low: npt.NDArray[np.uint64] = np.array([0], dtype=np.uint64) I_u8_low_like: list[int] = [0] I_u8_high_open: npt.NDArray[np.uint64] = np.array([18446744073709551615], dtype=np.uint64) I_u8_high_closed: npt.NDArray[np.uint64] = np.array([18446744073709551615], dtype=np.uint64) -assert_type(def_gen.integers(18446744073709551616, dtype="u8"), np.uint64) -assert_type(def_gen.integers(0, 18446744073709551616, dtype="u8"), np.uint64) -assert_type(def_gen.integers(18446744073709551615, dtype="u8", endpoint=True), np.uint64) -assert_type(def_gen.integers(0, 18446744073709551615, dtype="u8", endpoint=True), np.uint64) -assert_type(def_gen.integers(I_u8_low_like, 18446744073709551615, dtype="u8", endpoint=True), npt.NDArray[np.uint64]) -assert_type(def_gen.integers(I_u8_high_open, dtype="u8"), npt.NDArray[np.uint64]) -assert_type(def_gen.integers(I_u8_low, I_u8_high_open, dtype="u8"), npt.NDArray[np.uint64]) -assert_type(def_gen.integers(0, I_u8_high_open, dtype="u8"), npt.NDArray[np.uint64]) -assert_type(def_gen.integers(I_u8_high_closed, dtype="u8", endpoint=True), npt.NDArray[np.uint64]) -assert_type(def_gen.integers(I_u8_low, I_u8_high_closed, dtype="u8", endpoint=True), npt.NDArray[np.uint64]) -assert_type(def_gen.integers(0, I_u8_high_closed, dtype="u8", endpoint=True), npt.NDArray[np.uint64]) - -assert_type(def_gen.integers(18446744073709551616, dtype="uint64"), np.uint64) -assert_type(def_gen.integers(0, 18446744073709551616, dtype="uint64"), np.uint64) -assert_type(def_gen.integers(18446744073709551615, dtype="uint64", endpoint=True), np.uint64) -assert_type(def_gen.integers(0, 18446744073709551615, dtype="uint64", endpoint=True), np.uint64) -assert_type(def_gen.integers(I_u8_low_like, 18446744073709551615, dtype="uint64", endpoint=True), npt.NDArray[np.uint64]) -assert_type(def_gen.integers(I_u8_high_open, dtype="uint64"), npt.NDArray[np.uint64]) -assert_type(def_gen.integers(I_u8_low, I_u8_high_open, dtype="uint64"), npt.NDArray[np.uint64]) -assert_type(def_gen.integers(0, I_u8_high_open, dtype="uint64"), npt.NDArray[np.uint64]) -assert_type(def_gen.integers(I_u8_high_closed, dtype="uint64", endpoint=True), npt.NDArray[np.uint64]) -assert_type(def_gen.integers(I_u8_low, I_u8_high_closed, dtype="uint64", endpoint=True), npt.NDArray[np.uint64]) -assert_type(def_gen.integers(0, I_u8_high_closed, dtype="uint64", endpoint=True), npt.NDArray[np.uint64]) - assert_type(def_gen.integers(18446744073709551616, dtype=np.uint64), np.uint64) assert_type(def_gen.integers(0, 18446744073709551616, dtype=np.uint64), np.uint64) assert_type(def_gen.integers(18446744073709551615, dtype=np.uint64, endpoint=True), np.uint64) assert_type(def_gen.integers(0, 18446744073709551615, dtype=np.uint64, endpoint=True), np.uint64) -assert_type(def_gen.integers(I_u8_low_like, 18446744073709551615, dtype=np.uint64, endpoint=True), npt.NDArray[np.uint64]) -assert_type(def_gen.integers(I_u8_high_open, dtype=np.uint64), npt.NDArray[np.uint64]) -assert_type(def_gen.integers(I_u8_low, I_u8_high_open, dtype=np.uint64), npt.NDArray[np.uint64]) -assert_type(def_gen.integers(0, I_u8_high_open, dtype=np.uint64), npt.NDArray[np.uint64]) -assert_type(def_gen.integers(I_u8_high_closed, dtype=np.uint64, endpoint=True), npt.NDArray[np.uint64]) -assert_type(def_gen.integers(I_u8_low, I_u8_high_closed, dtype=np.uint64, endpoint=True), npt.NDArray[np.uint64]) -assert_type(def_gen.integers(0, I_u8_high_closed, dtype=np.uint64, endpoint=True), npt.NDArray[np.uint64]) +assert_type(def_gen.integers(I_u8_low_like, 18446744073709551615, dtype=np.uint64, endpoint=True), npt.NDArray[np.uint64] | Any) +assert_type(def_gen.integers(I_u8_high_open, dtype=np.uint64), npt.NDArray[np.uint64] | Any) +assert_type(def_gen.integers(I_u8_low, I_u8_high_open, dtype=np.uint64), npt.NDArray[np.uint64] | Any) +assert_type(def_gen.integers(0, I_u8_high_open, dtype=np.uint64), npt.NDArray[np.uint64] | Any) +assert_type(def_gen.integers(I_u8_high_closed, dtype=np.uint64, endpoint=True), npt.NDArray[np.uint64] | Any) +assert_type(def_gen.integers(I_u8_low, I_u8_high_closed, dtype=np.uint64, endpoint=True), npt.NDArray[np.uint64] | Any) +assert_type(def_gen.integers(0, I_u8_high_closed, dtype=np.uint64, endpoint=True), npt.NDArray[np.uint64] | Any) I_i1_low: npt.NDArray[np.int8] = np.array([-128], dtype=np.int8) I_i1_low_like: list[int] = [-128] I_i1_high_open: npt.NDArray[np.int8] = np.array([127], dtype=np.int8) I_i1_high_closed: npt.NDArray[np.int8] = np.array([127], dtype=np.int8) -assert_type(def_gen.integers(128, dtype="i1"), np.int8) -assert_type(def_gen.integers(-128, 128, dtype="i1"), np.int8) -assert_type(def_gen.integers(127, dtype="i1", endpoint=True), np.int8) -assert_type(def_gen.integers(-128, 127, dtype="i1", endpoint=True), np.int8) -assert_type(def_gen.integers(I_i1_low_like, 127, dtype="i1", endpoint=True), npt.NDArray[np.int8]) -assert_type(def_gen.integers(I_i1_high_open, dtype="i1"), npt.NDArray[np.int8]) -assert_type(def_gen.integers(I_i1_low, I_i1_high_open, dtype="i1"), npt.NDArray[np.int8]) -assert_type(def_gen.integers(-128, I_i1_high_open, dtype="i1"), npt.NDArray[np.int8]) -assert_type(def_gen.integers(I_i1_high_closed, dtype="i1", endpoint=True), npt.NDArray[np.int8]) -assert_type(def_gen.integers(I_i1_low, I_i1_high_closed, dtype="i1", endpoint=True), npt.NDArray[np.int8]) -assert_type(def_gen.integers(-128, I_i1_high_closed, dtype="i1", endpoint=True), npt.NDArray[np.int8]) - -assert_type(def_gen.integers(128, dtype="int8"), np.int8) -assert_type(def_gen.integers(-128, 128, dtype="int8"), np.int8) -assert_type(def_gen.integers(127, dtype="int8", endpoint=True), np.int8) -assert_type(def_gen.integers(-128, 127, dtype="int8", endpoint=True), np.int8) -assert_type(def_gen.integers(I_i1_low_like, 127, dtype="int8", endpoint=True), npt.NDArray[np.int8]) -assert_type(def_gen.integers(I_i1_high_open, dtype="int8"), npt.NDArray[np.int8]) -assert_type(def_gen.integers(I_i1_low, I_i1_high_open, dtype="int8"), npt.NDArray[np.int8]) -assert_type(def_gen.integers(-128, I_i1_high_open, dtype="int8"), npt.NDArray[np.int8]) -assert_type(def_gen.integers(I_i1_high_closed, dtype="int8", endpoint=True), npt.NDArray[np.int8]) -assert_type(def_gen.integers(I_i1_low, I_i1_high_closed, dtype="int8", endpoint=True), npt.NDArray[np.int8]) -assert_type(def_gen.integers(-128, I_i1_high_closed, dtype="int8", endpoint=True), npt.NDArray[np.int8]) - assert_type(def_gen.integers(128, dtype=np.int8), np.int8) assert_type(def_gen.integers(-128, 128, dtype=np.int8), np.int8) assert_type(def_gen.integers(127, dtype=np.int8, endpoint=True), np.int8) assert_type(def_gen.integers(-128, 127, dtype=np.int8, endpoint=True), np.int8) -assert_type(def_gen.integers(I_i1_low_like, 127, dtype=np.int8, endpoint=True), npt.NDArray[np.int8]) -assert_type(def_gen.integers(I_i1_high_open, dtype=np.int8), npt.NDArray[np.int8]) -assert_type(def_gen.integers(I_i1_low, I_i1_high_open, dtype=np.int8), npt.NDArray[np.int8]) -assert_type(def_gen.integers(-128, I_i1_high_open, dtype=np.int8), npt.NDArray[np.int8]) -assert_type(def_gen.integers(I_i1_high_closed, dtype=np.int8, endpoint=True), npt.NDArray[np.int8]) -assert_type(def_gen.integers(I_i1_low, I_i1_high_closed, dtype=np.int8, endpoint=True), npt.NDArray[np.int8]) -assert_type(def_gen.integers(-128, I_i1_high_closed, dtype=np.int8, endpoint=True), npt.NDArray[np.int8]) +assert_type(def_gen.integers(I_i1_low_like, 127, dtype=np.int8, endpoint=True), npt.NDArray[np.int8] | Any) +assert_type(def_gen.integers(I_i1_high_open, dtype=np.int8), npt.NDArray[np.int8] | Any) +assert_type(def_gen.integers(I_i1_low, I_i1_high_open, dtype=np.int8), npt.NDArray[np.int8] | Any) +assert_type(def_gen.integers(-128, I_i1_high_open, dtype=np.int8), npt.NDArray[np.int8] | Any) +assert_type(def_gen.integers(I_i1_high_closed, dtype=np.int8, endpoint=True), npt.NDArray[np.int8] | Any) +assert_type(def_gen.integers(I_i1_low, I_i1_high_closed, dtype=np.int8, endpoint=True), npt.NDArray[np.int8] | Any) +assert_type(def_gen.integers(-128, I_i1_high_closed, dtype=np.int8, endpoint=True), npt.NDArray[np.int8] | Any) I_i2_low: npt.NDArray[np.int16] = np.array([-32768], dtype=np.int16) I_i2_low_like: list[int] = [-32768] I_i2_high_open: npt.NDArray[np.int16] = np.array([32767], dtype=np.int16) I_i2_high_closed: npt.NDArray[np.int16] = np.array([32767], dtype=np.int16) -assert_type(def_gen.integers(32768, dtype="i2"), np.int16) -assert_type(def_gen.integers(-32768, 32768, dtype="i2"), np.int16) -assert_type(def_gen.integers(32767, dtype="i2", endpoint=True), np.int16) -assert_type(def_gen.integers(-32768, 32767, dtype="i2", endpoint=True), np.int16) -assert_type(def_gen.integers(I_i2_low_like, 32767, dtype="i2", endpoint=True), npt.NDArray[np.int16]) -assert_type(def_gen.integers(I_i2_high_open, dtype="i2"), npt.NDArray[np.int16]) -assert_type(def_gen.integers(I_i2_low, I_i2_high_open, dtype="i2"), npt.NDArray[np.int16]) -assert_type(def_gen.integers(-32768, I_i2_high_open, dtype="i2"), npt.NDArray[np.int16]) -assert_type(def_gen.integers(I_i2_high_closed, dtype="i2", endpoint=True), npt.NDArray[np.int16]) -assert_type(def_gen.integers(I_i2_low, I_i2_high_closed, dtype="i2", endpoint=True), npt.NDArray[np.int16]) -assert_type(def_gen.integers(-32768, I_i2_high_closed, dtype="i2", endpoint=True), npt.NDArray[np.int16]) - -assert_type(def_gen.integers(32768, dtype="int16"), np.int16) -assert_type(def_gen.integers(-32768, 32768, dtype="int16"), np.int16) -assert_type(def_gen.integers(32767, dtype="int16", endpoint=True), np.int16) -assert_type(def_gen.integers(-32768, 32767, dtype="int16", endpoint=True), np.int16) -assert_type(def_gen.integers(I_i2_low_like, 32767, dtype="int16", endpoint=True), npt.NDArray[np.int16]) -assert_type(def_gen.integers(I_i2_high_open, dtype="int16"), npt.NDArray[np.int16]) -assert_type(def_gen.integers(I_i2_low, I_i2_high_open, dtype="int16"), npt.NDArray[np.int16]) -assert_type(def_gen.integers(-32768, I_i2_high_open, dtype="int16"), npt.NDArray[np.int16]) -assert_type(def_gen.integers(I_i2_high_closed, dtype="int16", endpoint=True), npt.NDArray[np.int16]) -assert_type(def_gen.integers(I_i2_low, I_i2_high_closed, dtype="int16", endpoint=True), npt.NDArray[np.int16]) -assert_type(def_gen.integers(-32768, I_i2_high_closed, dtype="int16", endpoint=True), npt.NDArray[np.int16]) - assert_type(def_gen.integers(32768, dtype=np.int16), np.int16) assert_type(def_gen.integers(-32768, 32768, dtype=np.int16), np.int16) assert_type(def_gen.integers(32767, dtype=np.int16, endpoint=True), np.int16) assert_type(def_gen.integers(-32768, 32767, dtype=np.int16, endpoint=True), np.int16) -assert_type(def_gen.integers(I_i2_low_like, 32767, dtype=np.int16, endpoint=True), npt.NDArray[np.int16]) -assert_type(def_gen.integers(I_i2_high_open, dtype=np.int16), npt.NDArray[np.int16]) -assert_type(def_gen.integers(I_i2_low, I_i2_high_open, dtype=np.int16), npt.NDArray[np.int16]) -assert_type(def_gen.integers(-32768, I_i2_high_open, dtype=np.int16), npt.NDArray[np.int16]) -assert_type(def_gen.integers(I_i2_high_closed, dtype=np.int16, endpoint=True), npt.NDArray[np.int16]) -assert_type(def_gen.integers(I_i2_low, I_i2_high_closed, dtype=np.int16, endpoint=True), npt.NDArray[np.int16]) -assert_type(def_gen.integers(-32768, I_i2_high_closed, dtype=np.int16, endpoint=True), npt.NDArray[np.int16]) +assert_type(def_gen.integers(I_i2_low_like, 32767, dtype=np.int16, endpoint=True), npt.NDArray[np.int16] | Any) +assert_type(def_gen.integers(I_i2_high_open, dtype=np.int16), npt.NDArray[np.int16] | Any) +assert_type(def_gen.integers(I_i2_low, I_i2_high_open, dtype=np.int16), npt.NDArray[np.int16] | Any) +assert_type(def_gen.integers(-32768, I_i2_high_open, dtype=np.int16), npt.NDArray[np.int16] | Any) +assert_type(def_gen.integers(I_i2_high_closed, dtype=np.int16, endpoint=True), npt.NDArray[np.int16] | Any) +assert_type(def_gen.integers(I_i2_low, I_i2_high_closed, dtype=np.int16, endpoint=True), npt.NDArray[np.int16] | Any) +assert_type(def_gen.integers(-32768, I_i2_high_closed, dtype=np.int16, endpoint=True), npt.NDArray[np.int16] | Any) I_i4_low: npt.NDArray[np.int32] = np.array([-2147483648], dtype=np.int32) I_i4_low_like: list[int] = [-2147483648] I_i4_high_open: npt.NDArray[np.int32] = np.array([2147483647], dtype=np.int32) I_i4_high_closed: npt.NDArray[np.int32] = np.array([2147483647], dtype=np.int32) -assert_type(def_gen.integers(2147483648, dtype="i4"), np.int32) -assert_type(def_gen.integers(-2147483648, 2147483648, dtype="i4"), np.int32) -assert_type(def_gen.integers(2147483647, dtype="i4", endpoint=True), np.int32) -assert_type(def_gen.integers(-2147483648, 2147483647, dtype="i4", endpoint=True), np.int32) -assert_type(def_gen.integers(I_i4_low_like, 2147483647, dtype="i4", endpoint=True), npt.NDArray[np.int32]) -assert_type(def_gen.integers(I_i4_high_open, dtype="i4"), npt.NDArray[np.int32]) -assert_type(def_gen.integers(I_i4_low, I_i4_high_open, dtype="i4"), npt.NDArray[np.int32]) -assert_type(def_gen.integers(-2147483648, I_i4_high_open, dtype="i4"), npt.NDArray[np.int32]) -assert_type(def_gen.integers(I_i4_high_closed, dtype="i4", endpoint=True), npt.NDArray[np.int32]) -assert_type(def_gen.integers(I_i4_low, I_i4_high_closed, dtype="i4", endpoint=True), npt.NDArray[np.int32]) -assert_type(def_gen.integers(-2147483648, I_i4_high_closed, dtype="i4", endpoint=True), npt.NDArray[np.int32]) - -assert_type(def_gen.integers(2147483648, dtype="int32"), np.int32) -assert_type(def_gen.integers(-2147483648, 2147483648, dtype="int32"), np.int32) -assert_type(def_gen.integers(2147483647, dtype="int32", endpoint=True), np.int32) -assert_type(def_gen.integers(-2147483648, 2147483647, dtype="int32", endpoint=True), np.int32) -assert_type(def_gen.integers(I_i4_low_like, 2147483647, dtype="int32", endpoint=True), npt.NDArray[np.int32]) -assert_type(def_gen.integers(I_i4_high_open, dtype="int32"), npt.NDArray[np.int32]) -assert_type(def_gen.integers(I_i4_low, I_i4_high_open, dtype="int32"), npt.NDArray[np.int32]) -assert_type(def_gen.integers(-2147483648, I_i4_high_open, dtype="int32"), npt.NDArray[np.int32]) -assert_type(def_gen.integers(I_i4_high_closed, dtype="int32", endpoint=True), npt.NDArray[np.int32]) -assert_type(def_gen.integers(I_i4_low, I_i4_high_closed, dtype="int32", endpoint=True), npt.NDArray[np.int32]) -assert_type(def_gen.integers(-2147483648, I_i4_high_closed, dtype="int32", endpoint=True), npt.NDArray[np.int32]) - assert_type(def_gen.integers(2147483648, dtype=np.int32), np.int32) assert_type(def_gen.integers(-2147483648, 2147483648, dtype=np.int32), np.int32) assert_type(def_gen.integers(2147483647, dtype=np.int32, endpoint=True), np.int32) assert_type(def_gen.integers(-2147483648, 2147483647, dtype=np.int32, endpoint=True), np.int32) -assert_type(def_gen.integers(I_i4_low_like, 2147483647, dtype=np.int32, endpoint=True), npt.NDArray[np.int32]) -assert_type(def_gen.integers(I_i4_high_open, dtype=np.int32), npt.NDArray[np.int32]) -assert_type(def_gen.integers(I_i4_low, I_i4_high_open, dtype=np.int32), npt.NDArray[np.int32]) -assert_type(def_gen.integers(-2147483648, I_i4_high_open, dtype=np.int32), npt.NDArray[np.int32]) -assert_type(def_gen.integers(I_i4_high_closed, dtype=np.int32, endpoint=True), npt.NDArray[np.int32]) -assert_type(def_gen.integers(I_i4_low, I_i4_high_closed, dtype=np.int32, endpoint=True), npt.NDArray[np.int32]) -assert_type(def_gen.integers(-2147483648, I_i4_high_closed, dtype=np.int32, endpoint=True), npt.NDArray[np.int32]) +assert_type(def_gen.integers(I_i4_low_like, 2147483647, dtype=np.int32, endpoint=True), npt.NDArray[np.int32] | Any) +assert_type(def_gen.integers(I_i4_high_open, dtype=np.int32), npt.NDArray[np.int32] | Any) +assert_type(def_gen.integers(I_i4_low, I_i4_high_open, dtype=np.int32), npt.NDArray[np.int32] | Any) +assert_type(def_gen.integers(-2147483648, I_i4_high_open, dtype=np.int32), npt.NDArray[np.int32] | Any) +assert_type(def_gen.integers(I_i4_high_closed, dtype=np.int32, endpoint=True), npt.NDArray[np.int32] | Any) +assert_type(def_gen.integers(I_i4_low, I_i4_high_closed, dtype=np.int32, endpoint=True), npt.NDArray[np.int32] | Any) +assert_type(def_gen.integers(-2147483648, I_i4_high_closed, dtype=np.int32, endpoint=True), npt.NDArray[np.int32] | Any) I_i8_low: npt.NDArray[np.int64] = np.array([-9223372036854775808], dtype=np.int64) I_i8_low_like: list[int] = [-9223372036854775808] I_i8_high_open: npt.NDArray[np.int64] = np.array([9223372036854775807], dtype=np.int64) I_i8_high_closed: npt.NDArray[np.int64] = np.array([9223372036854775807], dtype=np.int64) -assert_type(def_gen.integers(9223372036854775808, dtype="i8"), np.int64) -assert_type(def_gen.integers(-9223372036854775808, 9223372036854775808, dtype="i8"), np.int64) -assert_type(def_gen.integers(9223372036854775807, dtype="i8", endpoint=True), np.int64) -assert_type(def_gen.integers(-9223372036854775808, 9223372036854775807, dtype="i8", endpoint=True), np.int64) -assert_type(def_gen.integers(I_i8_low_like, 9223372036854775807, dtype="i8", endpoint=True), npt.NDArray[np.int64]) -assert_type(def_gen.integers(I_i8_high_open, dtype="i8"), npt.NDArray[np.int64]) -assert_type(def_gen.integers(I_i8_low, I_i8_high_open, dtype="i8"), npt.NDArray[np.int64]) -assert_type(def_gen.integers(-9223372036854775808, I_i8_high_open, dtype="i8"), npt.NDArray[np.int64]) -assert_type(def_gen.integers(I_i8_high_closed, dtype="i8", endpoint=True), npt.NDArray[np.int64]) -assert_type(def_gen.integers(I_i8_low, I_i8_high_closed, dtype="i8", endpoint=True), npt.NDArray[np.int64]) -assert_type(def_gen.integers(-9223372036854775808, I_i8_high_closed, dtype="i8", endpoint=True), npt.NDArray[np.int64]) - -assert_type(def_gen.integers(9223372036854775808, dtype="int64"), np.int64) -assert_type(def_gen.integers(-9223372036854775808, 9223372036854775808, dtype="int64"), np.int64) -assert_type(def_gen.integers(9223372036854775807, dtype="int64", endpoint=True), np.int64) -assert_type(def_gen.integers(-9223372036854775808, 9223372036854775807, dtype="int64", endpoint=True), np.int64) -assert_type(def_gen.integers(I_i8_low_like, 9223372036854775807, dtype="int64", endpoint=True), npt.NDArray[np.int64]) -assert_type(def_gen.integers(I_i8_high_open, dtype="int64"), npt.NDArray[np.int64]) -assert_type(def_gen.integers(I_i8_low, I_i8_high_open, dtype="int64"), npt.NDArray[np.int64]) -assert_type(def_gen.integers(-9223372036854775808, I_i8_high_open, dtype="int64"), npt.NDArray[np.int64]) -assert_type(def_gen.integers(I_i8_high_closed, dtype="int64", endpoint=True), npt.NDArray[np.int64]) -assert_type(def_gen.integers(I_i8_low, I_i8_high_closed, dtype="int64", endpoint=True), npt.NDArray[np.int64]) -assert_type(def_gen.integers(-9223372036854775808, I_i8_high_closed, dtype="int64", endpoint=True), npt.NDArray[np.int64]) - assert_type(def_gen.integers(9223372036854775808, dtype=np.int64), np.int64) assert_type(def_gen.integers(-9223372036854775808, 9223372036854775808, dtype=np.int64), np.int64) assert_type(def_gen.integers(9223372036854775807, dtype=np.int64, endpoint=True), np.int64) assert_type(def_gen.integers(-9223372036854775808, 9223372036854775807, dtype=np.int64, endpoint=True), np.int64) -assert_type(def_gen.integers(I_i8_low_like, 9223372036854775807, dtype=np.int64, endpoint=True), npt.NDArray[np.int64]) -assert_type(def_gen.integers(I_i8_high_open, dtype=np.int64), npt.NDArray[np.int64]) -assert_type(def_gen.integers(I_i8_low, I_i8_high_open, dtype=np.int64), npt.NDArray[np.int64]) -assert_type(def_gen.integers(-9223372036854775808, I_i8_high_open, dtype=np.int64), npt.NDArray[np.int64]) -assert_type(def_gen.integers(I_i8_high_closed, dtype=np.int64, endpoint=True), npt.NDArray[np.int64]) -assert_type(def_gen.integers(I_i8_low, I_i8_high_closed, dtype=np.int64, endpoint=True), npt.NDArray[np.int64]) -assert_type(def_gen.integers(-9223372036854775808, I_i8_high_closed, dtype=np.int64, endpoint=True), npt.NDArray[np.int64]) +assert_type(def_gen.integers(I_i8_low_like, 9223372036854775807, dtype=np.int64, endpoint=True), npt.NDArray[np.int64] | Any) +assert_type(def_gen.integers(I_i8_high_open, dtype=np.int64), npt.NDArray[np.int64] | Any) +assert_type(def_gen.integers(I_i8_low, I_i8_high_open, dtype=np.int64), npt.NDArray[np.int64] | Any) +assert_type(def_gen.integers(-9223372036854775808, I_i8_high_open, dtype=np.int64), npt.NDArray[np.int64] | Any) +assert_type(def_gen.integers(I_i8_high_closed, dtype=np.int64, endpoint=True), npt.NDArray[np.int64] | Any) +assert_type(def_gen.integers(I_i8_low, I_i8_high_closed, dtype=np.int64, endpoint=True), npt.NDArray[np.int64] | Any) +assert_type(def_gen.integers(-9223372036854775808, I_i8_high_closed, dtype=np.int64, endpoint=True), npt.NDArray[np.int64] | Any) assert_type(def_gen.bit_generator, np.random.BitGenerator) @@ -897,11 +705,12 @@ assert_type(def_gen.choice(5, 3, replace=True), npt.NDArray[np.int64]) assert_type(def_gen.choice(5, 3, p=[1 / 5] * 5), npt.NDArray[np.int64]) assert_type(def_gen.choice(5, 3, p=[1 / 5] * 5, replace=False), npt.NDArray[np.int64]) -assert_type(def_gen.choice(["pooh", "rabbit", "piglet", "Christopher"]), Any) -assert_type(def_gen.choice(["pooh", "rabbit", "piglet", "Christopher"], 3), npt.NDArray[Any]) -assert_type(def_gen.choice(["pooh", "rabbit", "piglet", "Christopher"], 3, p=[1 / 4] * 4), npt.NDArray[Any]) -assert_type(def_gen.choice(["pooh", "rabbit", "piglet", "Christopher"], 3, replace=True), npt.NDArray[Any]) -assert_type(def_gen.choice(["pooh", "rabbit", "piglet", "Christopher"], 3, replace=False, p=np.array([1 / 8, 1 / 8, 1 / 2, 1 / 4])), npt.NDArray[Any]) +str_list: list[str] +assert_type(def_gen.choice(str_list), Any) +assert_type(def_gen.choice(str_list, 3), npt.NDArray[Any]) +assert_type(def_gen.choice(str_list, 3, p=[1 / 4] * 4), npt.NDArray[Any]) +assert_type(def_gen.choice(str_list, 3, replace=True), npt.NDArray[Any]) +assert_type(def_gen.choice(str_list, 3, replace=False, p=np.array([1 / 8, 1 / 8, 1 / 2, 1 / 4])), npt.NDArray[Any]) assert_type(def_gen.dirichlet([0.5, 0.5]), npt.NDArray[np.float64]) assert_type(def_gen.dirichlet(np.array([0.5, 0.5])), npt.NDArray[np.float64]) @@ -929,13 +738,13 @@ assert_type(def_gen.permutation(10), npt.NDArray[np.int64]) assert_type(def_gen.permutation([1, 2, 3, 4]), npt.NDArray[Any]) assert_type(def_gen.permutation(np.array([1, 2, 3, 4])), npt.NDArray[Any]) assert_type(def_gen.permutation(D_2D, axis=1), npt.NDArray[Any]) -assert_type(def_gen.permuted(D_2D), npt.NDArray[Any]) +assert_type(def_gen.permuted(D_2D), npt.NDArray[np.float64]) assert_type(def_gen.permuted(D_2D_like), npt.NDArray[Any]) -assert_type(def_gen.permuted(D_2D, axis=1), npt.NDArray[Any]) -assert_type(def_gen.permuted(D_2D, out=D_2D), npt.NDArray[Any]) -assert_type(def_gen.permuted(D_2D_like, out=D_2D), npt.NDArray[Any]) -assert_type(def_gen.permuted(D_2D_like, out=D_2D), npt.NDArray[Any]) -assert_type(def_gen.permuted(D_2D, axis=1, out=D_2D), npt.NDArray[Any]) +assert_type(def_gen.permuted(D_2D, axis=1), npt.NDArray[np.float64]) +assert_type(def_gen.permuted(D_2D, out=D_2D), npt.NDArray[np.float64]) +assert_type(def_gen.permuted(D_2D_like, out=D_2D), npt.NDArray[np.float64]) +assert_type(def_gen.permuted(D_2D_like, out=D_2D), npt.NDArray[np.float64]) +assert_type(def_gen.permuted(D_2D, axis=1, out=D_2D), npt.NDArray[np.float64]) assert_type(def_gen.shuffle(np.arange(10)), None) assert_type(def_gen.shuffle([1, 2, 3, 4, 5]), None) diff --git a/numpy/typing/tests/data/reveal/rec.pyi b/numpy/typing/tests/data/reveal/rec.pyi index aacf217e4207..da66ab003078 100644 --- a/numpy/typing/tests/data/reveal/rec.pyi +++ b/numpy/typing/tests/data/reveal/rec.pyi @@ -1,10 +1,10 @@ import io -from typing import Any, TypeAlias, assert_type +from typing import Any, assert_type import numpy as np import numpy.typing as npt -_RecArray: TypeAlias = np.recarray[tuple[Any, ...], np.dtype[np.record]] +type _RecArray = np.recarray[tuple[Any, ...], np.dtype[np.record]] AR_i8: npt.NDArray[np.int64] REC_AR_V: _RecArray diff --git a/numpy/typing/tests/data/reveal/scalars.pyi b/numpy/typing/tests/data/reveal/scalars.pyi index d7b277735c7c..c56c8e88092c 100644 --- a/numpy/typing/tests/data/reveal/scalars.pyi +++ b/numpy/typing/tests/data/reveal/scalars.pyi @@ -1,9 +1,7 @@ -from typing import Any, Literal, TypeAlias, assert_type +from typing import Any, Literal, assert_type import numpy as np -_1: TypeAlias = Literal[1] - b: np.bool u8: np.uint64 i8: np.int64 @@ -42,8 +40,96 @@ assert_type(c8.dtype, np.dtype[np.complex64]) assert_type(c8.real, np.float32) assert_type(c16.imag, np.float64) -assert_type(np.str_('foo'), np.str_) - +assert_type(np.str_("foo"), np.str_) + +# Indexing +assert_type(b[()], np.bool) +assert_type(i8[()], np.int64) +assert_type(u8[()], np.uint64) +assert_type(f8[()], np.float64) +assert_type(c8[()], np.complex64) +assert_type(c16[()], np.complex128) +assert_type(U[()], np.str_) +assert_type(S[()], np.bytes_) +assert_type(V[()], np.void) + +assert_type(b[...], np.ndarray[tuple[()], np.dtype[np.bool]]) +assert_type(b[(...,)], np.ndarray[tuple[()], np.dtype[np.bool]]) +assert_type(i8[...], np.ndarray[tuple[()], np.dtype[np.int64]]) +assert_type(i8[(...,)], np.ndarray[tuple[()], np.dtype[np.int64]]) +assert_type(u8[...], np.ndarray[tuple[()], np.dtype[np.uint64]]) +assert_type(u8[(...,)], np.ndarray[tuple[()], np.dtype[np.uint64]]) +assert_type(f8[...], np.ndarray[tuple[()], np.dtype[np.float64]]) +assert_type(f8[(...,)], np.ndarray[tuple[()], np.dtype[np.float64]]) +assert_type(c8[...], np.ndarray[tuple[()], np.dtype[np.complex64]]) +assert_type(c8[(...,)], np.ndarray[tuple[()], np.dtype[np.complex64]]) +assert_type(c16[...], np.ndarray[tuple[()], np.dtype[np.complex128]]) +assert_type(c16[(...,)], np.ndarray[tuple[()], np.dtype[np.complex128]]) +assert_type(U[...], np.ndarray[tuple[()], np.dtype[np.str_]]) +assert_type(U[(...,)], np.ndarray[tuple[()], np.dtype[np.str_]]) +assert_type(S[...], np.ndarray[tuple[()], np.dtype[np.bytes_]]) +assert_type(S[(...,)], np.ndarray[tuple[()], np.dtype[np.bytes_]]) +assert_type(V[...], np.ndarray[tuple[()], np.dtype[np.void]]) +assert_type(V[(...,)], np.ndarray[tuple[()], np.dtype[np.void]]) + +None1 = (None,) +None2 = (None, None) +None3 = (None, None, None) +None4 = (None, None, None, None) + +assert_type(b[None], np.ndarray[tuple[int], np.dtype[np.bool]]) +assert_type(b[None1], np.ndarray[tuple[int], np.dtype[np.bool]]) +assert_type(b[None2], np.ndarray[tuple[int, int], np.dtype[np.bool]]) +assert_type(b[None3], np.ndarray[tuple[int, int, int], np.dtype[np.bool]]) +assert_type(b[None4], np.ndarray[tuple[Any, ...], np.dtype[np.bool]]) + +assert_type(u8[None], np.ndarray[tuple[int], np.dtype[np.uint64]]) +assert_type(u8[None1], np.ndarray[tuple[int], np.dtype[np.uint64]]) +assert_type(u8[None2], np.ndarray[tuple[int, int], np.dtype[np.uint64]]) +assert_type(u8[None3], np.ndarray[tuple[int, int, int], np.dtype[np.uint64]]) +assert_type(u8[None4], np.ndarray[tuple[Any, ...], np.dtype[np.uint64]]) + +assert_type(i8[None], np.ndarray[tuple[int], np.dtype[np.int64]]) +assert_type(i8[None1], np.ndarray[tuple[int], np.dtype[np.int64]]) +assert_type(i8[None2], np.ndarray[tuple[int, int], np.dtype[np.int64]]) +assert_type(i8[None3], np.ndarray[tuple[int, int, int], np.dtype[np.int64]]) +assert_type(i8[None4], np.ndarray[tuple[Any, ...], np.dtype[np.int64]]) + +assert_type(f8[None], np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(f8[None1], np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(f8[None2], np.ndarray[tuple[int, int], np.dtype[np.float64]]) +assert_type(f8[None3], np.ndarray[tuple[int, int, int], np.dtype[np.float64]]) +assert_type(f8[None4], np.ndarray[tuple[Any, ...], np.dtype[np.float64]]) + +assert_type(c8[None], np.ndarray[tuple[int], np.dtype[np.complex64]]) +assert_type(c8[None1], np.ndarray[tuple[int], np.dtype[np.complex64]]) +assert_type(c8[None2], np.ndarray[tuple[int, int], np.dtype[np.complex64]]) +assert_type(c8[None3], np.ndarray[tuple[int, int, int], np.dtype[np.complex64]]) +assert_type(c8[None4], np.ndarray[tuple[Any, ...], np.dtype[np.complex64]]) + +assert_type(c16[None], np.ndarray[tuple[int], np.dtype[np.complex128]]) +assert_type(c16[None1], np.ndarray[tuple[int], np.dtype[np.complex128]]) +assert_type(c16[None2], np.ndarray[tuple[int, int], np.dtype[np.complex128]]) +assert_type(c16[None3], np.ndarray[tuple[int, int, int], np.dtype[np.complex128]]) +assert_type(c16[None4], np.ndarray[tuple[Any, ...], np.dtype[np.complex128]]) + +assert_type(U[None], np.ndarray[tuple[int], np.dtype[np.str_]]) +assert_type(U[None1], np.ndarray[tuple[int], np.dtype[np.str_]]) +assert_type(U[None2], np.ndarray[tuple[int, int], np.dtype[np.str_]]) +assert_type(U[None3], np.ndarray[tuple[int, int, int], np.dtype[np.str_]]) +assert_type(U[None4], np.ndarray[tuple[Any, ...], np.dtype[np.str_]]) + +assert_type(S[None], np.ndarray[tuple[int], np.dtype[np.bytes_]]) +assert_type(S[None1], np.ndarray[tuple[int], np.dtype[np.bytes_]]) +assert_type(S[None2], np.ndarray[tuple[int, int], np.dtype[np.bytes_]]) +assert_type(S[None3], np.ndarray[tuple[int, int, int], np.dtype[np.bytes_]]) +assert_type(S[None4], np.ndarray[tuple[Any, ...], np.dtype[np.bytes_]]) + +assert_type(V[None], np.ndarray[tuple[int], np.dtype[np.void]]) +assert_type(V[None1], np.ndarray[tuple[int], np.dtype[np.void]]) +assert_type(V[None2], np.ndarray[tuple[int, int], np.dtype[np.void]]) +assert_type(V[None3], np.ndarray[tuple[int, int, int], np.dtype[np.void]]) +assert_type(V[None4], np.ndarray[tuple[Any, ...], np.dtype[np.void]]) assert_type(V[0], Any) assert_type(V["field1"], Any) assert_type(V[["field1", "field2"]], np.void) @@ -110,17 +196,17 @@ assert_type(S.flatten(), np.ndarray[tuple[int], np.dtype[np.bytes_]]) assert_type(b.reshape(()), np.bool) assert_type(i8.reshape([]), np.int64) -assert_type(b.reshape(1), np.ndarray[tuple[_1], np.dtype[np.bool]]) -assert_type(i8.reshape(-1), np.ndarray[tuple[_1], np.dtype[np.int64]]) -assert_type(u8.reshape(1, 1), np.ndarray[tuple[_1, _1], np.dtype[np.uint64]]) -assert_type(f8.reshape(1, -1), np.ndarray[tuple[_1, _1], np.dtype[np.float64]]) -assert_type(c16.reshape(1, 1, 1), np.ndarray[tuple[_1, _1, _1], np.dtype[np.complex128]]) -assert_type(U.reshape(1, 1, 1, 1), np.ndarray[tuple[_1, _1, _1, _1], np.dtype[np.str_]]) +assert_type(b.reshape(1), np.ndarray[tuple[int], np.dtype[np.bool]]) +assert_type(i8.reshape(-1), np.ndarray[tuple[int], np.dtype[np.int64]]) +assert_type(u8.reshape(1, 1), np.ndarray[tuple[int, int], np.dtype[np.uint64]]) +assert_type(f8.reshape(1, -1), np.ndarray[tuple[int, int], np.dtype[np.float64]]) +assert_type(c16.reshape(1, 1, 1), np.ndarray[tuple[int, int, int], np.dtype[np.complex128]]) +assert_type(U.reshape(1, 1, 1, 1), np.ndarray[tuple[int, int, int, int], np.dtype[np.str_]]) assert_type( S.reshape(1, 1, 1, 1, 1), np.ndarray[ # len(shape) >= 5 - tuple[_1, _1, _1, _1, _1, *tuple[_1, ...]], + tuple[int, int, int, int, int, *tuple[int, ...]], np.dtype[np.bytes_], ], ) diff --git a/numpy/typing/tests/data/reveal/strings.pyi b/numpy/typing/tests/data/reveal/strings.pyi index 746e804ce577..166481d80922 100644 --- a/numpy/typing/tests/data/reveal/strings.pyi +++ b/numpy/typing/tests/data/reveal/strings.pyi @@ -1,11 +1,11 @@ -from typing import TypeAlias, assert_type +from typing import assert_type import numpy as np import numpy._typing as np_t import numpy.typing as npt -AR_T_alias: TypeAlias = np.ndarray[np_t._AnyShape, np.dtypes.StringDType] -AR_TU_alias: TypeAlias = AR_T_alias | npt.NDArray[np.str_] +type AR_T_alias = np.ndarray[np_t._AnyShape, np.dtypes.StringDType] +type AR_TU_alias = AR_T_alias | npt.NDArray[np.str_] AR_U: npt.NDArray[np.str_] AR_S: npt.NDArray[np.bytes_] @@ -190,3 +190,7 @@ assert_type(np.strings.str_len(AR_T), npt.NDArray[np.int_]) assert_type(np.strings.translate(AR_U, ""), npt.NDArray[np.str_]) assert_type(np.strings.translate(AR_S, ""), npt.NDArray[np.bytes_]) assert_type(np.strings.translate(AR_T, ""), AR_T_alias) + +assert_type(np.strings.slice(AR_U, 1, 5, 2), npt.NDArray[np.str_]) +assert_type(np.strings.slice(AR_S, 1, 5, 2), npt.NDArray[np.bytes_]) +assert_type(np.strings.slice(AR_T, 1, 5, 2), AR_T_alias) diff --git a/numpy/typing/tests/data/reveal/testing.pyi b/numpy/typing/tests/data/reveal/testing.pyi index d70bc971c15f..583ca60f90a7 100644 --- a/numpy/typing/tests/data/reveal/testing.pyi +++ b/numpy/typing/tests/data/reveal/testing.pyi @@ -6,7 +6,7 @@ import unittest import warnings from collections.abc import Callable from pathlib import Path -from typing import Any, TypeVar, assert_type +from typing import Any, assert_type import numpy as np import numpy.typing as npt @@ -15,8 +15,7 @@ AR_f8: npt.NDArray[np.float64] AR_i8: npt.NDArray[np.int64] bool_obj: bool -suppress_obj: np.testing.suppress_warnings -FT = TypeVar("FT", bound=Callable[..., Any]) +suppress_obj: np.testing.suppress_warnings # type: ignore[deprecated] # pyright: ignore[reportDeprecated] def func() -> int: ... @@ -58,12 +57,12 @@ with np.testing.clear_and_catch_warnings(True) as c1: with np.testing.clear_and_catch_warnings() as c2: assert_type(c2, None) -assert_type(np.testing.suppress_warnings("once"), np.testing.suppress_warnings) -assert_type(np.testing.suppress_warnings()(func), Callable[[], int]) +assert_type(np.testing.suppress_warnings("once"), np.testing.suppress_warnings) # type: ignore[deprecated] # pyright: ignore[reportDeprecated] +assert_type(np.testing.suppress_warnings()(func), Callable[[], int]) # type: ignore[deprecated] # pyright: ignore[reportDeprecated] assert_type(suppress_obj.filter(RuntimeWarning), None) assert_type(suppress_obj.record(RuntimeWarning), list[warnings.WarningMessage]) with suppress_obj as c3: - assert_type(c3, np.testing.suppress_warnings) + assert_type(c3, np.testing.suppress_warnings) # type: ignore[deprecated] # pyright: ignore[reportDeprecated] assert_type(np.testing.verbose, int) assert_type(np.testing.IS_PYPY, bool) @@ -90,7 +89,7 @@ assert_type(np.testing.assert_equal({1}, {1}), None) assert_type(np.testing.assert_equal([1, 2, 3], [1, 2, 3], err_msg="fail"), None) assert_type(np.testing.assert_equal(1, 1.0, verbose=True), None) -assert_type(np.testing.print_assert_equal('Test XYZ of func xyz', [0, 1], [0, 1]), None) +assert_type(np.testing.print_assert_equal("Test XYZ of func xyz", [0, 1], [0, 1]), None) assert_type(np.testing.assert_almost_equal(1.0, 1.1), None) assert_type(np.testing.assert_almost_equal([1, 2, 3], [1, 2, 3], err_msg="fail"), None) @@ -148,7 +147,7 @@ assert_type(np.testing.assert_raises_regex(RuntimeWarning, re.compile(b"test"), class Test: ... -def decorate(a: FT) -> FT: +def decorate[FT: Callable[..., Any]](a: FT) -> FT: return a assert_type(np.testing.decorate_methods(Test, decorate), None) @@ -172,8 +171,8 @@ assert_type(np.testing.assert_array_almost_equal_nulp(AR_i8, AR_f8, nulp=2), Non assert_type(np.testing.assert_array_max_ulp(AR_i8, AR_f8, maxulp=2), npt.NDArray[Any]) assert_type(np.testing.assert_array_max_ulp(AR_i8, AR_f8, dtype=np.float32), npt.NDArray[Any]) -assert_type(np.testing.assert_warns(RuntimeWarning), contextlib._GeneratorContextManager[None]) -assert_type(np.testing.assert_warns(RuntimeWarning, func3, 5), bool) +assert_type(np.testing.assert_warns(RuntimeWarning), contextlib._GeneratorContextManager[None]) # type: ignore[deprecated] # pyright: ignore[reportDeprecated] +assert_type(np.testing.assert_warns(RuntimeWarning, func3, 5), bool) # type: ignore[deprecated] # pyright: ignore[reportDeprecated] def func4(a: int, b: str) -> bool: ... diff --git a/numpy/typing/tests/data/reveal/twodim_base.pyi b/numpy/typing/tests/data/reveal/twodim_base.pyi index 7e9563a38611..8cafe729a943 100644 --- a/numpy/typing/tests/data/reveal/twodim_base.pyi +++ b/numpy/typing/tests/data/reveal/twodim_base.pyi @@ -1,145 +1,223 @@ -from typing import Any, TypeVar, assert_type +from typing import Any, assert_type, type_check_only import numpy as np import numpy.typing as npt -_ScalarT = TypeVar("_ScalarT", bound=np.generic) +type _1D = tuple[int] +type _2D = tuple[int, int] +type _ND = tuple[Any, ...] -def func1(ar: npt.NDArray[_ScalarT], a: int) -> npt.NDArray[_ScalarT]: ... +type _Indices2D = tuple[ + np.ndarray[_1D, np.dtype[np.intp]], + np.ndarray[_1D, np.dtype[np.intp]], +] -def func2(ar: npt.NDArray[np.number], a: str) -> npt.NDArray[np.float64]: ... - -AR_b: npt.NDArray[np.bool] -AR_u: npt.NDArray[np.uint64] -AR_i: npt.NDArray[np.int64] -AR_f: npt.NDArray[np.float64] -AR_c: npt.NDArray[np.complex128] -AR_O: npt.NDArray[np.object_] - -AR_LIKE_b: list[bool] -AR_LIKE_c: list[complex] - -assert_type(np.fliplr(AR_b), npt.NDArray[np.bool]) -assert_type(np.fliplr(AR_LIKE_b), npt.NDArray[Any]) - -assert_type(np.flipud(AR_b), npt.NDArray[np.bool]) -assert_type(np.flipud(AR_LIKE_b), npt.NDArray[Any]) - -assert_type(np.eye(10), npt.NDArray[np.float64]) -assert_type(np.eye(10, M=20, dtype=np.int64), npt.NDArray[np.int64]) -assert_type(np.eye(10, k=2, dtype=int), npt.NDArray[Any]) +### -assert_type(np.diag(AR_b), npt.NDArray[np.bool]) -assert_type(np.diag(AR_LIKE_b, k=0), npt.NDArray[Any]) +_nd_bool: np.ndarray[_ND, np.dtype[np.bool]] +_1d_bool: np.ndarray[_1D, np.dtype[np.bool]] +_2d_bool: np.ndarray[_2D, np.dtype[np.bool]] +_nd_u64: np.ndarray[_ND, np.dtype[np.uint64]] +_nd_i64: np.ndarray[_ND, np.dtype[np.int64]] +_nd_f64: np.ndarray[_ND, np.dtype[np.float64]] +_nd_c128: np.ndarray[_ND, np.dtype[np.complex128]] +_nd_obj: np.ndarray[_ND, np.dtype[np.object_]] -assert_type(np.diagflat(AR_b), npt.NDArray[np.bool]) -assert_type(np.diagflat(AR_LIKE_b, k=0), npt.NDArray[Any]) +_to_nd_bool: list[bool] | list[list[bool]] +_to_1d_bool: list[bool] +_to_2d_bool: list[list[bool]] -assert_type(np.tri(10), npt.NDArray[np.float64]) -assert_type(np.tri(10, M=20, dtype=np.int64), npt.NDArray[np.int64]) -assert_type(np.tri(10, k=2, dtype=int), npt.NDArray[Any]) +_to_1d_f64: list[float] +_to_1d_c128: list[complex] -assert_type(np.tril(AR_b), npt.NDArray[np.bool]) -assert_type(np.tril(AR_LIKE_b, k=0), npt.NDArray[Any]) - -assert_type(np.triu(AR_b), npt.NDArray[np.bool]) -assert_type(np.triu(AR_LIKE_b, k=0), npt.NDArray[Any]) - -assert_type(np.vander(AR_b), npt.NDArray[np.signedinteger]) -assert_type(np.vander(AR_u), npt.NDArray[np.signedinteger]) -assert_type(np.vander(AR_i, N=2), npt.NDArray[np.signedinteger]) -assert_type(np.vander(AR_f, increasing=True), npt.NDArray[np.floating]) -assert_type(np.vander(AR_c), npt.NDArray[np.complexfloating]) -assert_type(np.vander(AR_O), npt.NDArray[np.object_]) +@type_check_only +def func1[ScalarT: np.generic](ar: npt.NDArray[ScalarT], a: int) -> npt.NDArray[ScalarT]: ... +@type_check_only +def func2(ar: npt.NDArray[np.number], a: str) -> npt.NDArray[np.float64]: ... +@type_check_only +class _Cube: + shape = 3, 4 + ndim = 2 + +### + +# fliplr +assert_type(np.fliplr(_nd_bool), np.ndarray[_ND, np.dtype[np.bool]]) +assert_type(np.fliplr(_1d_bool), np.ndarray[_1D, np.dtype[np.bool]]) +assert_type(np.fliplr(_2d_bool), np.ndarray[_2D, np.dtype[np.bool]]) +assert_type(np.fliplr(_to_nd_bool), np.ndarray) +assert_type(np.fliplr(_to_1d_bool), np.ndarray) +assert_type(np.fliplr(_to_2d_bool), np.ndarray) + +# flipud +assert_type(np.flipud(_nd_bool), np.ndarray[_ND, np.dtype[np.bool]]) +assert_type(np.flipud(_1d_bool), np.ndarray[_1D, np.dtype[np.bool]]) +assert_type(np.flipud(_2d_bool), np.ndarray[_2D, np.dtype[np.bool]]) +assert_type(np.flipud(_to_nd_bool), np.ndarray) +assert_type(np.flipud(_to_1d_bool), np.ndarray) +assert_type(np.flipud(_to_2d_bool), np.ndarray) + +# eye +assert_type(np.eye(10), np.ndarray[_2D, np.dtype[np.float64]]) +assert_type(np.eye(10, M=20, dtype=np.int64), np.ndarray[_2D, np.dtype[np.int64]]) +assert_type(np.eye(10, k=2, dtype=int), np.ndarray[_2D]) + +# diag +assert_type(np.diag(_nd_bool), np.ndarray[_ND, np.dtype[np.bool]]) +assert_type(np.diag(_1d_bool), np.ndarray[_2D, np.dtype[np.bool]]) +assert_type(np.diag(_2d_bool), np.ndarray[_1D, np.dtype[np.bool]]) +assert_type(np.diag(_to_nd_bool, k=0), np.ndarray) +assert_type(np.diag(_to_1d_bool, k=0), np.ndarray[_2D]) +assert_type(np.diag(_to_2d_bool, k=0), np.ndarray[_1D]) + +# diagflat +assert_type(np.diagflat(_nd_bool), np.ndarray[_2D, np.dtype[np.bool]]) +assert_type(np.diagflat(_1d_bool), np.ndarray[_2D, np.dtype[np.bool]]) +assert_type(np.diagflat(_2d_bool), np.ndarray[_2D, np.dtype[np.bool]]) +assert_type(np.diagflat(_to_nd_bool, k=0), np.ndarray[_2D]) +assert_type(np.diagflat(_to_1d_bool, k=0), np.ndarray[_2D]) +assert_type(np.diagflat(_to_2d_bool, k=0), np.ndarray[_2D]) + +# tri +assert_type(np.tri(10), np.ndarray[_2D, np.dtype[np.float64]]) +assert_type(np.tri(10, M=20, dtype=np.int64), np.ndarray[_2D, np.dtype[np.int64]]) +assert_type(np.tri(10, k=2, dtype=int), np.ndarray[_2D]) + +# tril +assert_type(np.tril(_nd_bool), np.ndarray[_ND, np.dtype[np.bool]]) +assert_type(np.tril(_to_nd_bool, k=0), np.ndarray) +assert_type(np.tril(_to_1d_bool, k=0), np.ndarray) +assert_type(np.tril(_to_2d_bool, k=0), np.ndarray) + +# triu +assert_type(np.triu(_nd_bool), np.ndarray[_ND, np.dtype[np.bool]]) +assert_type(np.triu(_to_nd_bool, k=0), np.ndarray) +assert_type(np.triu(_to_1d_bool, k=0), np.ndarray) +assert_type(np.triu(_to_2d_bool, k=0), np.ndarray) + +# vander +assert_type(np.vander(_nd_bool), np.ndarray[_2D, np.dtype[np.int_]]) +assert_type(np.vander(_nd_u64), np.ndarray[_2D, np.dtype[np.uint64]]) +assert_type(np.vander(_nd_i64, N=2), np.ndarray[_2D, np.dtype[np.int64]]) +assert_type(np.vander(_nd_f64, increasing=True), np.ndarray[_2D, np.dtype[np.float64]]) +assert_type(np.vander(_nd_c128), np.ndarray[_2D, np.dtype[np.complex128]]) +assert_type(np.vander(_nd_obj), np.ndarray[_2D, np.dtype[np.object_]]) + +# histogram2d assert_type( - np.histogram2d(AR_LIKE_c, AR_LIKE_c), + np.histogram2d(_to_1d_f64, _to_1d_f64), tuple[ - npt.NDArray[np.float64], - npt.NDArray[np.complex128 | np.float64], - npt.NDArray[np.complex128 | np.float64], + np.ndarray[_1D, np.dtype[np.float64]], + np.ndarray[_1D, np.dtype[np.float64]], + np.ndarray[_1D, np.dtype[np.float64]], ], ) assert_type( - np.histogram2d(AR_i, AR_b), + np.histogram2d(_to_1d_c128, _to_1d_c128), tuple[ - npt.NDArray[np.float64], - npt.NDArray[np.float64], - npt.NDArray[np.float64], + np.ndarray[_1D, np.dtype[np.float64]], + np.ndarray[_1D, np.dtype[np.complex128 | Any]], + np.ndarray[_1D, np.dtype[np.complex128 | Any]], ], ) assert_type( - np.histogram2d(AR_f, AR_i), + np.histogram2d(_nd_i64, _nd_bool), tuple[ - npt.NDArray[np.float64], - npt.NDArray[np.float64], - npt.NDArray[np.float64], + np.ndarray[_1D, np.dtype[np.float64]], + np.ndarray[_1D, np.dtype[np.float64]], + np.ndarray[_1D, np.dtype[np.float64]], ], ) assert_type( - np.histogram2d(AR_i, AR_f), + np.histogram2d(_nd_f64, _nd_i64), tuple[ - npt.NDArray[np.float64], - npt.NDArray[np.float64], - npt.NDArray[np.float64], + np.ndarray[_1D, np.dtype[np.float64]], + np.ndarray[_1D, np.dtype[np.float64]], + np.ndarray[_1D, np.dtype[np.float64]], ], ) assert_type( - np.histogram2d(AR_f, AR_c, weights=AR_LIKE_b), + np.histogram2d(_nd_i64, _nd_f64), tuple[ - npt.NDArray[np.float64], - npt.NDArray[np.complex128], - npt.NDArray[np.complex128], + np.ndarray[_1D, np.dtype[np.float64]], + np.ndarray[_1D, np.dtype[np.float64]], + np.ndarray[_1D, np.dtype[np.float64]], ], ) assert_type( - np.histogram2d(AR_f, AR_c, bins=8), + np.histogram2d(_nd_f64, _nd_c128, weights=_to_1d_bool), tuple[ - npt.NDArray[np.float64], - npt.NDArray[np.complex128], - npt.NDArray[np.complex128], + np.ndarray[_1D, np.dtype[np.float64]], + np.ndarray[_1D, np.dtype[np.complex128]], + np.ndarray[_1D, np.dtype[np.complex128]], ], ) assert_type( - np.histogram2d(AR_c, AR_f, bins=(8, 5)), + np.histogram2d(_nd_f64, _nd_c128, bins=8), tuple[ - npt.NDArray[np.float64], - npt.NDArray[np.complex128], - npt.NDArray[np.complex128], + np.ndarray[_1D, np.dtype[np.float64]], + np.ndarray[_1D, np.dtype[np.complex128]], + np.ndarray[_1D, np.dtype[np.complex128]], ], ) assert_type( - np.histogram2d(AR_c, AR_i, bins=AR_u), + np.histogram2d(_nd_c128, _nd_f64, bins=(8, 5)), tuple[ - npt.NDArray[np.float64], - npt.NDArray[np.uint64], - npt.NDArray[np.uint64], + np.ndarray[_1D, np.dtype[np.float64]], + np.ndarray[_1D, np.dtype[np.complex128]], + np.ndarray[_1D, np.dtype[np.complex128]], ], ) assert_type( - np.histogram2d(AR_c, AR_c, bins=(AR_u, AR_u)), + np.histogram2d(_nd_c128, _nd_i64, bins=_nd_u64), tuple[ - npt.NDArray[np.float64], - npt.NDArray[np.uint64], - npt.NDArray[np.uint64], + np.ndarray[_1D, np.dtype[np.float64]], + np.ndarray[_1D, np.dtype[np.uint64]], + np.ndarray[_1D, np.dtype[np.uint64]], ], ) assert_type( - np.histogram2d(AR_c, AR_c, bins=(AR_b, 8)), + np.histogram2d(_nd_c128, _nd_c128, bins=(_nd_u64, _nd_u64)), tuple[ - npt.NDArray[np.float64], - npt.NDArray[np.bool | np.complex128], - npt.NDArray[np.bool | np.complex128], + np.ndarray[_1D, np.dtype[np.float64]], + np.ndarray[_1D, np.dtype[np.uint64]], + np.ndarray[_1D, np.dtype[np.uint64]], + ], +) +assert_type( + np.histogram2d(_nd_c128, _nd_c128, bins=(_nd_bool, 8)), + tuple[ + np.ndarray[_1D, np.dtype[np.float64]], + np.ndarray[_1D, np.dtype[np.complex128 | np.bool]], + np.ndarray[_1D, np.dtype[np.complex128 | np.bool]], + ], +) +assert_type( + np.histogram2d(_nd_c128, _nd_c128, bins=(_to_1d_f64, 8)), + tuple[ + np.ndarray[_1D, np.dtype[np.float64]], + np.ndarray[_1D, np.dtype[np.complex128 | Any]], + np.ndarray[_1D, np.dtype[np.complex128 | Any]], ], ) -assert_type(np.mask_indices(10, func1), tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]) -assert_type(np.mask_indices(8, func2, "0"), tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]) - -assert_type(np.tril_indices(10), tuple[npt.NDArray[np.int_], npt.NDArray[np.int_]]) - -assert_type(np.tril_indices_from(AR_b), tuple[npt.NDArray[np.int_], npt.NDArray[np.int_]]) - -assert_type(np.triu_indices(10), tuple[npt.NDArray[np.int_], npt.NDArray[np.int_]]) - -assert_type(np.triu_indices_from(AR_b), tuple[npt.NDArray[np.int_], npt.NDArray[np.int_]]) +# mask_indices +assert_type(np.mask_indices(10, func1), _Indices2D) +assert_type(np.mask_indices(8, func2, "0"), _Indices2D) + +# tril_indices +assert_type(np.tril_indices(3), _Indices2D) +assert_type(np.tril_indices(3, 1), _Indices2D) +assert_type(np.tril_indices(3, 1, 2), _Indices2D) +# tril_indices +assert_type(np.triu_indices(3), _Indices2D) +assert_type(np.triu_indices(3, 1), _Indices2D) +assert_type(np.triu_indices(3, 1, 2), _Indices2D) + +# tril_indices_from +assert_type(np.tril_indices_from(_2d_bool), _Indices2D) +assert_type(np.tril_indices_from(_Cube()), _Indices2D) +# triu_indices_from +assert_type(np.triu_indices_from(_2d_bool), _Indices2D) +assert_type(np.triu_indices_from(_Cube()), _Indices2D) diff --git a/numpy/typing/tests/data/reveal/ufunc_config.pyi b/numpy/typing/tests/data/reveal/ufunc_config.pyi index 748507530aa1..f205b82b4f75 100644 --- a/numpy/typing/tests/data/reveal/ufunc_config.pyi +++ b/numpy/typing/tests/data/reveal/ufunc_config.pyi @@ -1,23 +1,23 @@ """Typing tests for `_core._ufunc_config`.""" +from _typeshed import SupportsWrite from collections.abc import Callable from typing import Any, assert_type -from _typeshed import SupportsWrite - import numpy as np +from numpy._core._ufunc_config import _ErrDict def func(a: str, b: int) -> None: ... class Write: def write(self, value: str) -> None: ... -assert_type(np.seterr(all=None), np._core._ufunc_config._ErrDict) -assert_type(np.seterr(divide="ignore"), np._core._ufunc_config._ErrDict) -assert_type(np.seterr(over="warn"), np._core._ufunc_config._ErrDict) -assert_type(np.seterr(under="call"), np._core._ufunc_config._ErrDict) -assert_type(np.seterr(invalid="raise"), np._core._ufunc_config._ErrDict) -assert_type(np.geterr(), np._core._ufunc_config._ErrDict) +assert_type(np.seterr(all=None), _ErrDict) +assert_type(np.seterr(divide="ignore"), _ErrDict) +assert_type(np.seterr(over="warn"), _ErrDict) +assert_type(np.seterr(under="call"), _ErrDict) +assert_type(np.seterr(invalid="raise"), _ErrDict) +assert_type(np.geterr(), _ErrDict) assert_type(np.setbufsize(4096), int) assert_type(np.getbufsize(), int) diff --git a/numpy/typing/tests/data/reveal/ufunclike.pyi b/numpy/typing/tests/data/reveal/ufunclike.pyi index a0ede60e0158..c679b82d2836 100644 --- a/numpy/typing/tests/data/reveal/ufunclike.pyi +++ b/numpy/typing/tests/data/reveal/ufunclike.pyi @@ -1,4 +1,4 @@ -from typing import Any, assert_type +from typing import assert_type import numpy as np import numpy.typing as npt @@ -11,12 +11,12 @@ AR_LIKE_O: list[np.object_] AR_U: npt.NDArray[np.str_] -assert_type(np.fix(AR_LIKE_b), npt.NDArray[np.floating]) -assert_type(np.fix(AR_LIKE_u), npt.NDArray[np.floating]) -assert_type(np.fix(AR_LIKE_i), npt.NDArray[np.floating]) -assert_type(np.fix(AR_LIKE_f), npt.NDArray[np.floating]) -assert_type(np.fix(AR_LIKE_O), npt.NDArray[np.object_]) -assert_type(np.fix(AR_LIKE_f, out=AR_U), npt.NDArray[np.str_]) +assert_type(np.fix(AR_LIKE_b), npt.NDArray[np.floating]) # type: ignore[deprecated] +assert_type(np.fix(AR_LIKE_u), npt.NDArray[np.floating]) # type: ignore[deprecated] +assert_type(np.fix(AR_LIKE_i), npt.NDArray[np.floating]) # type: ignore[deprecated] +assert_type(np.fix(AR_LIKE_f), npt.NDArray[np.floating]) # type: ignore[deprecated] +assert_type(np.fix(AR_LIKE_O), npt.NDArray[np.object_]) # type: ignore[deprecated] +assert_type(np.fix(AR_LIKE_f, out=AR_U), npt.NDArray[np.str_]) # type: ignore[deprecated] assert_type(np.isposinf(AR_LIKE_b), npt.NDArray[np.bool]) assert_type(np.isposinf(AR_LIKE_u), npt.NDArray[np.bool]) diff --git a/numpy/typing/tests/data/reveal/ufuncs.pyi b/numpy/typing/tests/data/reveal/ufuncs.pyi index 93a8bfb15d06..eda92f2117c6 100644 --- a/numpy/typing/tests/data/reveal/ufuncs.pyi +++ b/numpy/typing/tests/data/reveal/ufuncs.pyi @@ -98,26 +98,45 @@ assert_type(np.bitwise_count.identity, None) assert_type(np.bitwise_count(i8), Any) assert_type(np.bitwise_count(AR_i8), npt.NDArray[Any]) -assert_type(np.absolute.outer(), NoReturn) -assert_type(np.frexp.outer(), NoReturn) -assert_type(np.divmod.outer(), NoReturn) -assert_type(np.matmul.outer(), NoReturn) +def test_absolute_outer_invalid() -> None: + assert_type(np.absolute.outer(AR_f8, AR_f8), NoReturn) # type: ignore[arg-type] +def test_frexp_outer_invalid() -> None: + assert_type(np.frexp.outer(AR_f8, AR_f8), NoReturn) # type: ignore[arg-type] +def test_divmod_outer_invalid() -> None: + assert_type(np.divmod.outer(AR_f8, AR_f8), NoReturn) # type: ignore[arg-type] +def test_matmul_outer_invalid() -> None: + assert_type(np.matmul.outer(AR_f8, AR_f8), NoReturn) # type: ignore[arg-type] -assert_type(np.absolute.reduceat(), NoReturn) -assert_type(np.frexp.reduceat(), NoReturn) -assert_type(np.divmod.reduceat(), NoReturn) -assert_type(np.matmul.reduceat(), NoReturn) +def test_absolute_reduceat_invalid() -> None: + assert_type(np.absolute.reduceat(AR_f8, AR_i8), NoReturn) # type: ignore[arg-type] +def test_frexp_reduceat_invalid() -> None: + assert_type(np.frexp.reduceat(AR_f8, AR_i8), NoReturn) # type: ignore[arg-type] +def test_divmod_reduceat_invalid() -> None: + assert_type(np.divmod.reduceat(AR_f8, AR_i8), NoReturn) # type: ignore[arg-type] +def test_matmul_reduceat_invalid() -> None: + assert_type(np.matmul.reduceat(AR_f8, AR_i8), NoReturn) # type: ignore[arg-type] -assert_type(np.absolute.reduce(), NoReturn) -assert_type(np.frexp.reduce(), NoReturn) -assert_type(np.divmod.reduce(), NoReturn) -assert_type(np.matmul.reduce(), NoReturn) +def test_absolute_reduce_invalid() -> None: + assert_type(np.absolute.reduce(AR_f8), NoReturn) # type: ignore[arg-type] +def test_frexp_reduce_invalid() -> None: + assert_type(np.frexp.reduce(AR_f8), NoReturn) # type: ignore[arg-type] +def test_divmod_reduce_invalid() -> None: + assert_type(np.divmod.reduce(AR_f8), NoReturn) # type: ignore[arg-type] +def test_matmul_reduce_invalid() -> None: + assert_type(np.matmul.reduce(AR_f8), NoReturn) # type: ignore[arg-type] -assert_type(np.absolute.accumulate(), NoReturn) -assert_type(np.frexp.accumulate(), NoReturn) -assert_type(np.divmod.accumulate(), NoReturn) -assert_type(np.matmul.accumulate(), NoReturn) +def test_absolute_accumulate_invalid() -> None: + assert_type(np.absolute.accumulate(AR_f8), NoReturn) # type: ignore[arg-type] +def test_frexp_accumulate_invalid() -> None: + assert_type(np.frexp.accumulate(AR_f8), NoReturn) # type: ignore[arg-type] +def test_divmod_accumulate_invalid() -> None: + assert_type(np.divmod.accumulate(AR_f8), NoReturn) # type: ignore[arg-type] +def test_matmul_accumulate_invalid() -> None: + assert_type(np.matmul.accumulate(AR_f8), NoReturn) # type: ignore[arg-type] -assert_type(np.frexp.at(), NoReturn) -assert_type(np.divmod.at(), NoReturn) -assert_type(np.matmul.at(), NoReturn) +def test_frexp_at_invalid() -> None: + assert_type(np.frexp.at(AR_f8, i8), NoReturn) # type: ignore[arg-type] +def test_divmod_at_invalid() -> None: + assert_type(np.divmod.at(AR_f8, i8, AR_f8), NoReturn) # type: ignore[arg-type] +def test_matmul_at_invalid() -> None: + assert_type(np.matmul.at(AR_f8, i8, AR_f8), NoReturn) # type: ignore[arg-type] diff --git a/numpy/typing/tests/test_isfile.py b/numpy/typing/tests/test_isfile.py index f72122f208c9..250686a98ee8 100644 --- a/numpy/typing/tests/test_isfile.py +++ b/numpy/typing/tests/test_isfile.py @@ -1,7 +1,8 @@ import os -import sys from pathlib import Path +import pytest + import numpy as np from numpy.testing import assert_ @@ -21,10 +22,12 @@ ROOT / "random" / "__init__.pyi", ROOT / "testing" / "__init__.pyi", ] -if sys.version_info < (3, 12): - FILES += [ROOT / "distutils" / "__init__.pyi"] +@pytest.mark.thread_unsafe( + reason="os.path has a thread-safety bug (python/cpython#140054). " + "Expected to only be a problem in 3.14.0" +) class TestIsFile: def test_isfile(self): """Test if all ``.pyi`` files are properly installed.""" diff --git a/numpy/typing/tests/test_runtime.py b/numpy/typing/tests/test_runtime.py index 236952101126..9db74c8ddc28 100644 --- a/numpy/typing/tests/test_runtime.py +++ b/numpy/typing/tests/test_runtime.py @@ -3,7 +3,8 @@ from typing import ( Any, NamedTuple, - Union, # pyright: ignore[reportDeprecated] + Self, + TypeAliasType, get_args, get_origin, get_type_hints, @@ -17,18 +18,23 @@ class TypeTup(NamedTuple): - typ: type - args: tuple[type, ...] - origin: type | None + typ: type # type expression + args: tuple[type, ...] # generic type parameters or arguments + origin: type | None # e.g. `UnionType` or `GenericAlias` + @classmethod + def from_type_alias(cls, alias: TypeAliasType, /) -> Self: + # PEP 695 `type _ = ...` aliases wrap the type expression as a + # `types.TypeAliasType` instance with a `__value__` attribute. + tp = alias.__value__ + return cls(typ=tp, args=get_args(tp), origin=get_origin(tp)) -NDArrayTup = TypeTup(npt.NDArray, npt.NDArray.__args__, np.ndarray) TYPES = { - "ArrayLike": TypeTup(npt.ArrayLike, npt.ArrayLike.__args__, Union), - "DTypeLike": TypeTup(npt.DTypeLike, npt.DTypeLike.__args__, Union), - "NBitBase": TypeTup(npt.NBitBase, (), None), - "NDArray": NDArrayTup, + "ArrayLike": TypeTup.from_type_alias(npt.ArrayLike), + "DTypeLike": TypeTup.from_type_alias(npt.DTypeLike), + "NBitBase": TypeTup(npt.NBitBase, (), None), # type: ignore[deprecated] # pyright: ignore[reportDeprecated] + "NDArray": TypeTup.from_type_alias(npt.NDArray), } @@ -68,7 +74,7 @@ def test_get_type_hints_str(name: type, tup: TypeTup) -> None: def func(a: typ_str) -> None: pass out = get_type_hints(func) - ref = {"a": typ, "return": type(None)} + ref = {"a": getattr(npt, str(name)), "return": type(None)} assert out == ref @@ -80,7 +86,6 @@ def test_keys() -> None: PROTOCOLS: dict[str, tuple[type[Any], object]] = { - "_SupportsDType": (_npt._SupportsDType, np.int64(1)), "_SupportsArray": (_npt._SupportsArray, np.arange(10)), "_SupportsArrayFunc": (_npt._SupportsArrayFunc, np.arange(10)), "_NestedSequence": (_npt._NestedSequence, [1]), @@ -94,9 +99,5 @@ def test_isinstance(self, cls: type[Any], obj: object) -> None: assert not isinstance(None, cls) def test_issubclass(self, cls: type[Any], obj: object) -> None: - if cls is _npt._SupportsDType: - pytest.xfail( - "Protocols with non-method members don't support issubclass()" - ) assert issubclass(type(obj), cls) assert not issubclass(type(None), cls) diff --git a/numpy/version.pyi b/numpy/version.pyi index 113cde3f5621..073885c017c2 100644 --- a/numpy/version.pyi +++ b/numpy/version.pyi @@ -1,18 +1,9 @@ from typing import Final, LiteralString -__all__ = ( - '__version__', - 'full_version', - 'git_revision', - 'release', - 'short_version', - 'version', -) +version: Final[LiteralString] = ... +__version__: Final[LiteralString] = ... +full_version: Final[LiteralString] = ... -version: Final[LiteralString] -__version__: Final[LiteralString] -full_version: Final[LiteralString] - -git_revision: Final[LiteralString] -release: Final[bool] -short_version: Final[LiteralString] +git_revision: Final[LiteralString] = ... +release: Final[bool] = ... +short_version: Final[LiteralString] = ... diff --git a/pavement.py b/pavement.py deleted file mode 100644 index e00b9647f5e3..000000000000 --- a/pavement.py +++ /dev/null @@ -1,184 +0,0 @@ -r""" -This paver file is intended to help with the release process as much as -possible. It relies on virtualenv to generate 'bootstrap' environments as -independent from the user system as possible (e.g. to make sure the sphinx doc -is built against the built numpy, not an installed one). - -Building changelog + notes -========================== - -Assumes you have git and the binaries/tarballs in installers/:: - - paver write_release - paver write_note - -This automatically put the checksum into README.rst, and writes the Changelog. - -TODO -==== - - the script is messy, lots of global variables - - make it more easily customizable (through command line args) - - missing targets: install & test, sdist test, debian packaging - - fix bdist_mpkg: we build the same source twice -> how to make sure we use - the same underlying python for egg install in venv and for bdist_mpkg -""" -import hashlib -import os -import textwrap - -# The paver package needs to be installed to run tasks -import paver -from paver.easy import Bunch, options, sh, task - -#----------------------------------- -# Things to be changed for a release -#----------------------------------- - -# Path to the release notes -RELEASE_NOTES = 'doc/source/release/2.3.0-notes.rst' - - -#------------------------------------------------------- -# Hardcoded build/install dirs, virtualenv options, etc. -#------------------------------------------------------- - -# Where to put the release installers -options(installers=Bunch(releasedir="release", - installersdir=os.path.join("release", "installers")),) - - -#------------- -# README stuff -#------------- - -def _compute_hash(idirs, hashfunc): - """Hash files using given hashfunc. - - Parameters - ---------- - idirs : directory path - Directory containing files to be hashed. - hashfunc : hash function - Function to be used to hash the files. - - """ - released = paver.path.path(idirs).listdir() - checksums = [] - for fpath in sorted(released): - with open(fpath, 'rb') as fin: - fhash = hashfunc(fin.read()) - checksums.append( - f'{fhash.hexdigest()} {os.path.basename(fpath)}') - return checksums - - -def compute_md5(idirs): - """Compute md5 hash of files in idirs. - - Parameters - ---------- - idirs : directory path - Directory containing files to be hashed. - - """ - return _compute_hash(idirs, hashlib.md5) - - -def compute_sha256(idirs): - """Compute sha256 hash of files in idirs. - - Parameters - ---------- - idirs : directory path - Directory containing files to be hashed. - - """ - # better checksum so gpg signed README.rst containing the sums can be used - # to verify the binaries instead of signing all binaries - return _compute_hash(idirs, hashlib.sha256) - - -def write_release_task(options, filename='README'): - """Append hashes of release files to release notes. - - This appends file hashes to the release notes and creates - four README files of the result in various formats: - - - README.rst - - README.rst.gpg - - README.md - - README.md.gpg - - The md file are created using `pandoc` so that the links are - properly updated. The gpg files are kept separate, so that - the unsigned files may be edited before signing if needed. - - Parameters - ---------- - options : - Set by ``task`` decorator. - filename : str - Filename of the modified notes. The file is written - in the release directory. - - """ - idirs = options.installers.installersdir - notes = paver.path.path(RELEASE_NOTES) - rst_readme = paver.path.path(filename + '.rst') - md_readme = paver.path.path(filename + '.md') - - # append hashes - with open(rst_readme, 'w') as freadme: - with open(notes) as fnotes: - freadme.write(fnotes.read()) - - freadme.writelines(textwrap.dedent( - """ - Checksums - ========= - - MD5 - --- - :: - - """)) - freadme.writelines([f' {c}\n' for c in compute_md5(idirs)]) - - freadme.writelines(textwrap.dedent( - """ - SHA256 - ------ - :: - - """)) - freadme.writelines([f' {c}\n' for c in compute_sha256(idirs)]) - - # generate md file using pandoc before signing - sh(f"pandoc -s -o {md_readme} {rst_readme}") - - # Sign files - if hasattr(options, 'gpg_key'): - cmd = f'gpg --clearsign --armor --default_key {options.gpg_key}' - else: - cmd = 'gpg --clearsign --armor' - - sh(cmd + f' --output {rst_readme}.gpg {rst_readme}') - sh(cmd + f' --output {md_readme}.gpg {md_readme}') - - -@task -def write_release(options): - """Write the README files. - - Two README files are generated from the release notes, one in ``rst`` - markup for the general release, the other in ``md`` markup for the github - release notes. - - Parameters - ---------- - options : - Set by ``task`` decorator. - - """ - rdir = options.installers.releasedir - write_release_task(options, os.path.join(rdir, 'README')) diff --git a/pyproject.toml b/pyproject.toml index b62d71cbba73..084ba993072e 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,34 +1,30 @@ [build-system] build-backend = "mesonpy" requires = [ - "meson-python>=0.15.0", + "meson-python>=0.18.0", "Cython>=3.0.6", # keep in sync with version check in meson.build ] [project] name = "numpy" -version = "2.3.0.dev0" -# TODO: add `license-files` once PEP 639 is accepted (see meson-python#88) -license = {file = "LICENSE.txt"} - +version = "2.5.0.dev0" description = "Fundamental package for array computing in Python" authors = [{name = "Travis E. Oliphant et al."}] maintainers = [ {name = "NumPy Developers", email="numpy-discussion@python.org"}, ] -requires-python = ">=3.11" +requires-python = ">=3.12" readme = "README.md" classifiers = [ 'Development Status :: 5 - Production/Stable', 'Intended Audience :: Science/Research', 'Intended Audience :: Developers', - 'License :: OSI Approved :: BSD License', 'Programming Language :: C', 'Programming Language :: Python', 'Programming Language :: Python :: 3', - 'Programming Language :: Python :: 3.11', 'Programming Language :: Python :: 3.12', 'Programming Language :: Python :: 3.13', + 'Programming Language :: Python :: 3.14', 'Programming Language :: Python :: 3 :: Only', 'Programming Language :: Python :: Implementation :: CPython', 'Topic :: Software Development', @@ -39,6 +35,52 @@ classifiers = [ 'Operating System :: Unix', 'Operating System :: MacOS', ] +# License info: +# - The main NumPy project license is BSD-3-Clause. +# - The SPDX license expression below reflects installed numpy packages when +# built from source (e.g., with `python -m build --wheel`), with no vendoring. +# - That SPDX expression is therefore incomplete for: +# (a) sdists - see the comment below `license-files` for other licenses +# included in the sdist +# (b) wheels on PyPI - most wheels include vendored libraries with additional licenses: +# - libopenblas : BSD-3-Clause AND BSD-3-Clause-Attribution (all except arm64 macOS>=14) +# - libgfortran : GPL-3.0-with-GCC-exception (all except arm64 macOS>=14) +# - libquadmath : LGPL-2.1-or-later (all except arm64 macOS>=14 and Windows) +# The licenses for these vendored components are dynamically included +# in the build process for PyPI wheels. +license = 'BSD-3-Clause AND 0BSD AND MIT AND Zlib AND CC0-1.0' +license-files = [ + 'LICENSE.txt', # BSD-3-Clause + 'numpy/_core/include/numpy/libdivide/LICENSE.txt', # Zlib + 'numpy/_core/src/common/pythoncapi-compat/COPYING', # 0BSD + 'numpy/_core/src/highway/LICENSE', # Dual-licensed: Apache 2.0 or BSD 3-Clause + 'numpy/_core/src/multiarray/dragon4_LICENSE.txt', # MIT + 'numpy/_core/src/npysort/x86-simd-sort/LICENSE.md', # BSD-3-Clause + 'numpy/_core/src/umath/svml/LICENSE', # BSD-3-Clause + 'numpy/fft/pocketfft/LICENSE.md', # BSD-3-Clause + 'numpy/ma/LICENSE', # BSD-3-Clause + 'numpy/linalg/lapack_lite/LICENSE.txt', # BSD-3-Clause + 'numpy/random/LICENSE.md', # BSD-3-Clause + 'numpy/random/src/distributions/LICENSE.md', # BSD-3-Clause AND MIT + 'numpy/random/src/mt19937/LICENSE.md', # BSD-3-Clause AND MIT + 'numpy/random/src/pcg64/LICENSE.md', # MIT + 'numpy/random/src/philox/LICENSE.md', # BSD-3-Clause + 'numpy/random/src/sfc64/LICENSE.md', # MIT + 'numpy/random/src/splitmix64/LICENSE.md', # CC0-1.0 +] +# The license files below apply only to files in the repo and sdist, not to +# installed `numpy` packages or wheels (build/doc tools don't affect the +# license of the installed package). We have to make a choice whether to add +# those to the SPDX expression above since PEP 639 is unclear on the +# differences; we choose to make the SPDX expression reflect *a wheel built +# from the sources*. +# '.spin/LICENSE', # BSD-3-Clause +# 'doc/source/_static/scipy-mathjax/LICENSE', # Apache-2.0 +# 'numpy/_build_utils/tempita/LICENSE.txt', # BSD-3-Clause +# 'vendored-meson/meson/COPYING', # Apache-2.0 +# Note that the commented out license files are still included in the sdist, +# just not in Core Metadata and in the .dist-info directory. + [project.scripts] f2py = 'numpy.f2py.f2py2e:main' @@ -139,73 +181,60 @@ tracker = "https://github.com/numpy/numpy/issues" [tool.cibuildwheel] # Note: the below skip command doesn't do much currently, the platforms to -# build wheels for in CI are controlled in `.github/workflows/wheels.yml` and -# `tools/ci/cirrus_wheels.yml`. -build-frontend = "build" -skip = "*_i686 *_ppc64le *_s390x *_universal2" +# build wheels for in CI are controlled in `.github/workflows/wheels.yml`. +# universal2 wheels are not supported (see gh-21233), use `delocate-fuse` if you need them +skip = ["*_i686", "*_ppc64le", "*_s390x", "*_universal2"] before-build = "bash {project}/tools/wheels/cibw_before_build.sh {project}" -# The build will use openblas64 everywhere, except on arm64 macOS >=14.0 (uses Accelerate) -config-settings = "setup-args=-Duse-ilp64=true setup-args=-Dallow-noblas=false build-dir=build" before-test = "pip install -r {project}/requirements/test_requirements.txt" test-command = "bash {project}/tools/wheels/cibw_test_command.sh {project}" enable = ["cpython-freethreading", "pypy", "cpython-prerelease"] +# The build will use openblas64 everywhere, except on arm64 macOS >=14.0 (uses Accelerate) +[tool.cibuildwheel.config-settings] +setup-args = ["-Duse-ilp64=true", "-Dallow-noblas=false"] +build-dir = "build" + [tool.cibuildwheel.linux] manylinux-x86_64-image = "manylinux_2_28" manylinux-aarch64-image = "manylinux_2_28" musllinux-x86_64-image = "musllinux_1_2" musllinux-aarch64-image = "musllinux_1_2" -[tool.cibuildwheel.pyodide] -config-settings = "build-dir=build setup-args=--cross-file=$PWD/tools/ci/emscripten/emscripten.meson.cross setup-args=-Dblas=none setup-args=-Dlapack=none" - [tool.cibuildwheel.linux.environment] -# RUNNER_OS is a GitHub Actions specific env var; define it here so it works on Cirrus CI too +# RUNNER_OS is a GitHub Actions specific env var; define it here so it's +# defined when running cibuildwheel locally RUNNER_OS="Linux" # /project will be the $PWD equivalent inside the docker used to build the wheel PKG_CONFIG_PATH="/project/.openblas" -LD_LIBRARY_PATH="$LD_LIBRARY_PATH:/project/.openblas/lib" - -[tool.cibuildwheel.macos] -# universal2 wheels are not supported (see gh-21233), use `delocate-fuse` if you need them -# note that universal2 wheels are not built, they're listed in the tool.cibuildwheel.skip -# section -# Not clear why the DYLD_LIBRARY_PATH is not passed through from the environment -repair-wheel-command = [ - "export DYLD_LIBRARY_PATH=$PWD/.openblas/lib", - "echo DYLD_LIBRARY_PATH $DYLD_LIBRARY_PATH", - "delocate-wheel --require-archs {delocate_archs} -w {dest_dir} -v {wheel}", -] [tool.cibuildwheel.windows] -# This does not work, use CIBW_ENVIRONMENT_WINDOWS -environment = {PKG_CONFIG_PATH="./.openblas"} -config-settings = "setup-args=--vsenv setup-args=-Dallow-noblas=false build-dir=build" +config-settings = {setup-args = ["--vsenv", "-Dallow-noblas=false"], build-dir="build"} repair-wheel-command = "bash -el ./tools/wheels/repair_windows.sh {wheel} {dest_dir}" [[tool.cibuildwheel.overrides]] -select = "*-win32" -config-settings = "setup-args=--vsenv setup-args=-Dallow-noblas=true build-dir=build" +select = ["*-win32"] +config-settings = {setup-args = ["--vsenv", "-Dallow-noblas=true"], build-dir="build"} repair-wheel-command = "" -[[tool.cibuildwheel.overrides]] -select = "*-win_arm64" -config-settings = "setup-args=--vsenv setup-args=-Dallow-noblas=true build-dir=build" -repair-wheel-command = "" - -[[tool.cibuildwheel.overrides]] -select = "*pyodide*" +[tool.cibuildwheel.pyodide] before-test = "pip install -r {project}/requirements/emscripten_test_requirements.txt" # Pyodide ensures that the wheels are already repaired by auditwheel-emscripten repair-wheel-command = "" test-command = "python -m pytest --pyargs numpy -m 'not slow'" +[tool.cibuildwheel.pyodide.config-settings] +build-dir = "build" +setup-args = ["--cross-file=$PWD/tools/ci/emscripten/emscripten.meson.cross", "-Dblas=none", "-Dlapack=none"] + + + [tool.meson-python] meson = 'vendored-meson/meson/meson.py' [tool.meson-python.args] install = ['--tags=runtime,python-runtime,tests,devel'] + [tool.spin] package = 'numpy' @@ -217,6 +246,7 @@ cli = 'vendored-meson/meson/meson.py' ".spin/cmds.py:build", ".spin/cmds.py:test", ".spin/cmds.py:mypy", + ".spin/cmds.py:stubtest", ".spin/cmds.py:config_openblas", ".spin/cmds.py:lint", ] diff --git a/pytest.ini b/pytest.ini index 132af0bb78ab..532095ab9aa7 100644 --- a/pytest.ini +++ b/pytest.ini @@ -1,5 +1,5 @@ [pytest] -addopts = -l +addopts = -l -ra --strict-markers --strict-config norecursedirs = doc tools numpy/linalg/lapack_lite numpy/_core/code_generators numpy/_core/src/common/pythoncapi-compat doctest_optionflags = NORMALIZE_WHITESPACE ELLIPSIS ALLOW_UNICODE ALLOW_BYTES junit_family=xunit2 @@ -22,11 +22,8 @@ filterwarnings = ignore:The numpy.array_api submodule is still experimental. See NEP 47. # ignore matplotlib headless warning for pyplot ignore:Matplotlib is currently using agg, which is a non-GUI backend, so cannot show the figure.:UserWarning -# Ignore DeprecationWarnings from distutils - ignore::DeprecationWarning:.*distutils - ignore:\n\n `numpy.distutils`:DeprecationWarning # Ignore DeprecationWarning from typing.mypy_plugin ignore:`numpy.typing.mypy_plugin` is deprecated:DeprecationWarning # Ignore DeprecationWarning from struct module # see https://github.com/numpy/numpy/issues/28926 - ignore:Due to \'_pack_\', the \ No newline at end of file + ignore:Due to \'_pack_\', the diff --git a/requirements/build_requirements.txt b/requirements/build_requirements.txt index a51143a780e7..1f6eb1435cfc 100644 --- a/requirements/build_requirements.txt +++ b/requirements/build_requirements.txt @@ -1,5 +1,5 @@ meson-python>=0.13.1 Cython>=3.0.6 ninja -spin==0.13 +spin==0.15 build diff --git a/requirements/ci32_requirements.txt b/requirements/ci32_requirements.txt index 5a7be719214a..824934787e10 100644 --- a/requirements/ci32_requirements.txt +++ b/requirements/ci32_requirements.txt @@ -1,3 +1,3 @@ -spin==0.13 +spin==0.15 # Keep this in sync with ci_requirements.txt -scipy-openblas32==0.3.29.0.0 +scipy-openblas32==0.3.30.0.7 diff --git a/requirements/ci_requirements.txt b/requirements/ci_requirements.txt index adf7d86558f0..37e685fef0cc 100644 --- a/requirements/ci_requirements.txt +++ b/requirements/ci_requirements.txt @@ -1,4 +1,4 @@ -spin==0.13 +spin==0.15 # Keep this in sync with ci32_requirements.txt -scipy-openblas32==0.3.29.0.0 -scipy-openblas64==0.3.29.0.0 +scipy-openblas32==0.3.30.0.7 +scipy-openblas64==0.3.30.0.7 diff --git a/requirements/doc_requirements.txt b/requirements/doc_requirements.txt index 330f0f7ac8b9..b8f5cb2bd8fd 100644 --- a/requirements/doc_requirements.txt +++ b/requirements/doc_requirements.txt @@ -5,7 +5,7 @@ pydata-sphinx-theme>=0.15.2 sphinx-copybutton sphinx-design scipy -matplotlib +matplotlib!=3.10.6 pandas breathe>4.33.0 ipython!=8.1.0 @@ -17,9 +17,7 @@ pickleshare towncrier toml - -# for doctests, also needs pytz which is in test_requirements -scipy-doctest==1.6.0 +scipy-doctest>=1.8.0 # interactive documentation utilities # see https://github.com/jupyterlite/pyodide-kernel#compatibility diff --git a/requirements/emscripten_test_requirements.txt b/requirements/emscripten_test_requirements.txt index 18cfb219034d..019a69da687a 100644 --- a/requirements/emscripten_test_requirements.txt +++ b/requirements/emscripten_test_requirements.txt @@ -1,4 +1,4 @@ hypothesis==6.81.1 pytest==7.4.0 -pytz==2023.3.post1 +tzdata pytest-xdist diff --git a/requirements/linter_requirements.txt b/requirements/linter_requirements.txt index 0716b235ec9c..73eafbaf52a1 100644 --- a/requirements/linter_requirements.txt +++ b/requirements/linter_requirements.txt @@ -1,3 +1,4 @@ # keep in sync with `environment.yml` -ruff==0.11.9 +cython-lint +ruff==0.14.7 GitPython>=3.1.30 diff --git a/requirements/release_requirements.txt b/requirements/release_requirements.txt index d23e69fa1fa8..eaa092560d2d 100644 --- a/requirements/release_requirements.txt +++ b/requirements/release_requirements.txt @@ -12,8 +12,5 @@ gitpython>=3.1.30 # uploading wheels twine -# building and notes -Paver - # uploading release documentation packaging diff --git a/requirements/setuptools_requirement.txt b/requirements/setuptools_requirement.txt deleted file mode 100644 index 21f900d46078..000000000000 --- a/requirements/setuptools_requirement.txt +++ /dev/null @@ -1,2 +0,0 @@ -setuptools==65.5.1 ; python_version < '3.12' -setuptools ; python_version >= '3.12' diff --git a/requirements/test_requirements.txt b/requirements/test_requirements.txt index a2a68f044a50..e3b17f0fc856 100644 --- a/requirements/test_requirements.txt +++ b/requirements/test_requirements.txt @@ -1,19 +1,13 @@ Cython -wheel==0.38.1 -setuptools==65.5.1 ; python_version < '3.12' -setuptools ; python_version >= '3.12' -hypothesis==6.104.1 +hypothesis==6.142.2 pytest==7.4.0 -pytz==2023.3.post1 pytest-cov==4.1.0 meson ninja; sys_platform != "emscripten" pytest-xdist pytest-timeout -# For testing types. Notes on the restrictions: -# - Mypy relies on C API features not present in PyPy -# NOTE: Keep mypy in sync with environment.yml -mypy==1.15.0; platform_python_implementation != "PyPy" -typing_extensions>=4.5.0 +# For testing types +mypy==1.19.1 # for optional f2py encoding detection charset-normalizer +tzdata diff --git a/ruff.toml b/ruff.toml index 6b05d8de69ee..ebbb29283622 100644 --- a/ruff.toml +++ b/ruff.toml @@ -1,8 +1,6 @@ extend-exclude = [ "numpy/__config__.py", - "numpy/distutils", "numpy/typing/_char_codes.py", - "numpy/typing/tests/data", "spin/cmds.py", # Submodules. "doc/source/_static/scipy-mathjax", @@ -16,70 +14,95 @@ extend-exclude = [ line-length = 88 +[format] +line-ending = "lf" + [lint] preview = true extend-select = [ - "B", - "C4", - "ISC", - "LOG", - "G", - "PIE", - "TID", - "FLY", - "I", - "PD", - "E", - "W", - "PGH", - "PLE", - "UP", + "B", # flake8-bugbear + "C4", # flake8-comprehensions + "ISC", # flake8-implicit-str-concat + "LOG", # flake8-logging + "G", # flake8-logging-format + "PIE", # flake8-pie + "Q", # flake8-quotes + "TID", # flake8-tidy-imports + "FLY", # flynt + "I", # isort + "PD", # pandas-vet + "PERF", # perflint + "E", # pycodestyle/error + "W", # pycodestyle/warning + "PGH", # pygrep-hooks + "PLE", # pylint/error + "UP", # pyupgrade ] ignore = [ - "B006", # Do not use mutable data structures for argument defaults - "B007", # Loop control variable not used within loop body - "B011", # Do not `assert False` (`python -O` removes these calls), raise `AssertionError()` - "B023", # Function definition does not bind loop variable - "B028", # No explicit `stacklevel` keyword argument found - "B904", # Within an `except` clause distinguish raised exceptions from errors in exception handling - "B905", #`zip()` without an explicit `strict=` parameter - "C408", # Unnecessary `dict()` call (rewrite as a literal) - "ISC002", # Implicitly concatenated string literals over multiple lines - "PIE790", # Unnecessary `pass` statement - "PD901", # Avoid using the generic variable name `df` for DataFrames - "E241", # Multiple spaces after comma - "E265", # Block comment should start with `# ` - "E266", # Too many leading `#` before block comment - "E302", # TODO: Expected 2 blank lines, found 1 - "E402", # Module level import not at top of file - "E712", # Avoid equality comparisons to `True` or `False` - "E721", # TODO: Use `is` and `is not` for type comparisons, or `isinstance()` for isinstance check - "E731", # Do not assign a `lambda` expression, use a `def` - "E741", # Ambiguous variable name - "F403", # `from ... import *` used; unable to detect undefined names - "F405", # may be undefined, or defined from star imports - "F821", # Undefined name - "F841", # Local variable is assigned to but never used - "UP015", # Unnecessary mode argument - "UP031", # TODO: Use format specifiers instead of percent format + # flake8-bugbear + "B006", # Do not use mutable data structures for argument defaults + "B007", # Loop control variable not used within loop body + "B011", # Do not `assert False` (`python -O` removes these calls), raise `AssertionError()` + "B023", # Function definition does not bind loop variable + "B028", # No explicit `stacklevel` keyword argument found + "B904", # Within an `except` clause distinguish raised exceptions from errors in exception handling + "B905", #`zip()` without an explicit `strict=` parameter + # flake8-comprehensions + "C408", # Unnecessary `dict()` call (rewrite as a literal) + # flake8-implicit-str-concat + "ISC002", # Implicitly concatenated string literals over multiple lines + # flake8-pie + "PIE790", # Unnecessary `pass` statement + # perflint + "PERF401", # Use a list comprehension to create a transformed list + # pycodestyle/error + "E241", # Multiple spaces after comma + "E265", # Block comment should start with `# ` + "E266", # Too many leading `#` before block comment + "E302", # TODO: Expected 2 blank lines, found 1 + "E402", # Module level import not at top of file + "E712", # Avoid equality comparisons to `True` or `False` + "E721", # TODO: Use `is` and `is not` for type comparisons, or `isinstance()` for isinstance check + "E731", # Do not assign a `lambda` expression, use a `def` + "E741", # Ambiguous variable name + # pyflakes + "F403", # `from ... import *` used; unable to detect undefined names + "F405", # may be undefined, or defined from star imports + "F821", # Undefined name + "F841", # Local variable is assigned to but never used + # pyupgrade + "UP015" , # Unnecessary mode argument + "UP031", # TODO: Use format specifiers instead of percent format ] [lint.per-file-ignores] "_tempita.py" = ["B909"] "bench_*.py" = ["B015", "B018"] "test*.py" = ["B015", "B018", "E201", "E714"] -"benchmarks/*py" = ["E501"] -"numpy/_core/tests/**" = ["E501"] + +"numpy/_core/tests/test_cpu_features.py" = ["E501"] +"numpy/_core/tests/test_defchararray.py" = ["E501"] +"numpy/_core/tests/test_einsum.py" = ["E501"] +"numpy/_core/tests/test_multiarray.py" = ["E501"] +"numpy/_core/tests/test_nditer*py" = ["E501"] +"numpy/_core/tests/test_umath.py" = ["E501"] +"numpy/_core/tests/test_numerictypes.py" = ["E501"] +"numpy/_core/tests/test_regression.py" = ["E501"] "numpy/_core/_add_newdocs.py" = ["E501"] "numpy/_core/_add_newdocs_scalars.py" = ["E501"] "numpy/_core/code_generators/generate_umath.py" = ["E501"] -"numpy/_typing/*py" = ["E501"] -"numpy/lib/tests/*py" = ["E501"] -"numpy/linalg/tests/*py" = ["E501"] -"numpy/ma/tests/*py" = ["E501"] -"numpy/tests/*py" = ["E501"] -"numpy*pyi" = ["E501"] +"numpy/lib/tests/test_format.py" = ["E501"] +"numpy/linalg/tests/test_linalg.py" = ["E501"] "numpy/f2py/*py" = ["E501"] +# for typing related files we follow https://typing.python.org/en/latest/guides/writing_stubs.html#maximum-line-length +"numpy/_typing/_array_like.py" = ["E501"] +"numpy/_typing/_dtype_like.py" = ["E501"] +"numpy*pyi" = ["E501"] +# "useless assignments" aren't so useless when you're testing that they don't make type checkers scream +"numpy/typing/tests/data/*" = ["B015", "B018", "E501"] +# too disruptive to enable all at once +"**/*.py" = ["Q"] + "__init__.py" = ["F401", "F403", "F405"] "__init__.pyi" = ["F401"] "numpy/_core/defchararray.py" = ["F403", "F405"] @@ -92,3 +115,17 @@ ignore = [ "numpy/ma/core.pyi" = ["F403", "F405"] "numpy/matlib.py" = ["F405"] "numpy/matlib.pyi" = ["F811"] + +[lint.flake8-builtins] +builtins-allowed-modules = ["random", "typing"] + +[lint.flake8-import-conventions.extend-aliases] +"numpy" = "np" +"numpy.typing" = "npt" + +[lint.isort] +# these are treated as stdlib within .pyi stubs +extra-standard-library = ["_typeshed", "typing_extensions"] +known-first-party = ["numpy"] +combine-as-imports = true +split-on-trailing-comma = false diff --git a/tools/check_installed_files.py b/tools/check_installed_files.py index 61bc49197d79..fd66b68a43fc 100644 --- a/tools/check_installed_files.py +++ b/tools/check_installed_files.py @@ -84,11 +84,6 @@ def get_files(dir_to_check, kind='test'): relpath = os.path.relpath(path, dir_to_check) files[relpath] = path - if sys.version_info >= (3, 12): - files = { - k: v for k, v in files.items() if not k.startswith('distutils') - } - # ignore python files in vendored pythoncapi-compat submodule files = { k: v for k, v in files.items() if 'pythoncapi-compat' not in k diff --git a/tools/check_openblas_version.py b/tools/check_openblas_version.py index 9aa0b265dea5..085308b71171 100644 --- a/tools/check_openblas_version.py +++ b/tools/check_openblas_version.py @@ -1,20 +1,107 @@ """ -usage: check_openblas_version.py +Checks related to the OpenBLAS version used in CI. -Check the blas version is blas from scipy-openblas and is higher than -min_version -example: check_openblas_version.py 0.3.26 +Options: +1. Check that the BLAS used at build time is (a) scipy-openblas, and (b) its version is + higher than a given minimum version. Note: this method only seems to give + the first 3 version components, so 0.3.30.0.7 gets translated to 0.3.30 when reading + it back out from `scipy.show_config()`. +2. Check requirements files in the main numpy repo and compare with the numpy-release + repo. Goal is to ensure that `numpy-release` is not behind. + +Both of these checks are primarily useful in a CI job. + +Examples: + + # Requires install numpy + $ python check_openblas_version.py --min-version 0.3.30 + + # Only needs the requirements files + $ python check_openblas_version.py --req-files \ + ../numpy-release/requirements/openblas_requirements.txt """ +import argparse +import os.path import pprint -import sys -import numpy -version = sys.argv[1] -deps = numpy.show_config('dicts')['Build Dependencies'] -assert "blas" in deps -print("Build Dependencies: blas") -pprint.pprint(deps["blas"]) -assert deps["blas"]["version"].split(".") >= version.split(".") -assert deps["blas"]["name"] == "scipy-openblas" +def check_built_version(min_version): + import numpy + deps = numpy.show_config('dicts')['Build Dependencies'] + assert "blas" in deps + print("Build Dependencies: blas") + pprint.pprint(deps["blas"]) + assert deps["blas"]["version"].split(".") >= min_version.split(".") + assert deps["blas"]["name"] == "scipy-openblas" + + +def check_requirements_files(reqfile): + if not os.path.exists(reqfile): + print(f"Path does not exist: {reqfile}") + + def get_version(line): + req = line.split(";")[0].split("==")[1].split(".")[:5] + return tuple(int(s) for s in req) + + def parse_reqs(reqfile): + with open(reqfile) as f: + lines = f.readlines() + + v32 = None + v64 = None + for line in lines: + if "scipy-openblas32" in line: + v32 = get_version(line) + if "scipy-openblas64" in line: + v64 = get_version(line) + if v32 is None or v64 is None: + raise AssertionError("Expected `scipy-openblas32` and " + "`scipy-openblas64` in `ci_requirements.txt`, " + f"got:\n {' '.join(lines)}") + return v32, v64 + + this_dir = os.path.abspath(os.path.dirname(__file__)) + reqfile_thisrepo = os.path.join(this_dir, '..', 'requirements', + 'ci_requirements.txt') + + v32_thisrepo, v64_thisrepo = parse_reqs(reqfile_thisrepo) + v32_rel, v64_rel = parse_reqs(reqfile) + + def compare_versions(v_rel, v_thisrepo, bits): + if not v_rel >= v_thisrepo: + raise AssertionError(f"`numpy-release` version of scipy-openblas{bits} " + f"{v_rel} is behind this repo: {v_thisrepo}") + + compare_versions(v64_rel, v64_thisrepo, "64") + compare_versions(v32_rel, v32_thisrepo, "32") + + +def main(): + parser = argparse.ArgumentParser() + parser.add_argument( + "--req-files", + type=str, + help="Path to the requirements file to compare with the one in this repo" + ) + parser.add_argument( + "--min-version", + type=str, + help="The minimum version that should have been used at build time for " + "installed `numpy` package" + ) + args = parser.parse_args() + + if args.min_version is None and args.req_files is None: + raise ValueError("One of `--req-files` or `--min-version` needs to be " + "specified") + + if args.min_version: + check_built_version(args.min_version) + + if args.req_files: + check_requirements_files(args.req_files) + + +if __name__ == '__main__': + main() diff --git a/tools/check_python_h_first.py b/tools/check_python_h_first.py new file mode 100755 index 000000000000..c0d44ad635f4 --- /dev/null +++ b/tools/check_python_h_first.py @@ -0,0 +1,254 @@ +#!/usr/bin/env python +"""Check that Python.h is included before any stdlib headers. + +May be a bit overzealous, but it should get the job done. +""" +import argparse +import fnmatch +import os.path +import re +import subprocess +import sys + +from get_submodule_paths import get_submodule_paths + +HEADER_PATTERN = re.compile( + r'^\s*#\s*include\s*[<"]((?:\w+/)*\w+(?:\.h[hp+]{0,2})?)[>"]\s*$' +) + +PYTHON_INCLUDING_HEADERS = [ + "Python.h", + # This isn't all of Python.h, but it is the visibility macros + "pyconfig.h", + "numpy/npy_common.h", + "numpy/npy_math.h", + "numpy/arrayobject.h", + "numpy/ndarrayobject.h", + "numpy/ndarraytypes.h", + "numpy/random/distributions.h", + "npy_sort.h", + "npy_config.h", + "common.h", + "npy_cpu_features.h", + # Boost::Python + "boost/python.hpp", +] +LEAF_HEADERS = [ + "numpy/numpyconfig.h", + "numpy/npy_os.h", + "numpy/npy_cpu.h", + "numpy/utils.h", +] + +C_CPP_EXTENSIONS = (".c", ".h", ".cpp", ".hpp", ".cc", ".hh", ".cxx", ".hxx") +# check against list in diff_files + +PARSER = argparse.ArgumentParser(description=__doc__) +PARSER.add_argument( + "files", + nargs="*", + help="Lint these files or directories; use **/*.c to lint all files\n" + "Expects relative paths", +) + + +def check_python_h_included_first(name_to_check: str) -> int: + """Check that the passed file includes Python.h first if it does at all. + + Perhaps overzealous, but that should work around concerns with + recursion. + + Parameters + ---------- + name_to_check : str + The name of the file to check. + + Returns + ------- + int + The number of headers before Python.h + """ + included_python = False + included_non_python_header = [] + warned_python_construct = False + basename_to_check = os.path.basename(name_to_check) + in_comment = False + includes_headers = False + with open(name_to_check) as in_file: + for i, line in enumerate(in_file, 1): + # Very basic comment parsing + # Assumes /*...*/ comments are on their own lines + if "/*" in line: + if "*/" not in line: + in_comment = True + # else-branch could use regex to remove comment and continue + continue + if in_comment: + if "*/" in line: + in_comment = False + continue + line = line.split("//", 1)[0].strip() + match = HEADER_PATTERN.match(line) + if match: + includes_headers = True + this_header = match.group(1) + if this_header in PYTHON_INCLUDING_HEADERS: + if included_non_python_header and not included_python: + # Headers before python-including header + print( + f"Header before Python.h in file {name_to_check:s}\n" + f"Python.h on line {i:d}, other header(s) on line(s)" + f" {included_non_python_header}", + file=sys.stderr, + ) + # else: # no headers before python-including header + included_python = True + PYTHON_INCLUDING_HEADERS.append(basename_to_check) + if os.path.dirname(name_to_check).endswith("include/numpy"): + PYTHON_INCLUDING_HEADERS.append(f"numpy/{basename_to_check:s}") + # We just found out where Python.h comes in this file + break + elif this_header in LEAF_HEADERS: + # This header is just defines, so it won't include + # the system headers that cause problems + continue + elif not included_python and ( + "numpy/" in this_header + and this_header not in LEAF_HEADERS + or "python" in this_header.lower() + ): + print( + f"Python.h not included before python-including header " + f"in file {name_to_check:s}\n" + f"{this_header:s} on line {i:d}", + file=sys.stderr, + ) + included_python = True + PYTHON_INCLUDING_HEADERS.append(basename_to_check) + elif not included_python and this_header not in LEAF_HEADERS: + included_non_python_header.append(i) + elif ( + not included_python + and not warned_python_construct + and ".h" not in basename_to_check + ) and ("py::" in line or "PYBIND11_" in line): + print( + "Python-including header not used before python constructs " + f"in file {name_to_check:s}\nConstruct on line {i:d}", + file=sys.stderr, + ) + warned_python_construct = True + if not includes_headers: + LEAF_HEADERS.append(basename_to_check) + return included_python and len(included_non_python_header) + + +def sort_order(path: str) -> tuple[int, str]: + if "include/numpy" in path: + # Want to process numpy/*.h first, to work out which of those + # include Python.h directly + priority = 0x00 + elif "h" in os.path.splitext(path)[1].lower(): + # Then other headers, which tend to include numpy/*.h + priority = 0x10 + else: + # Source files after headers, to give the best chance of + # properly checking whether they include Python.h + priority = 0x20 + if "common" in path: + priority -= 8 + path_basename = os.path.basename(path) + if path_basename.startswith("npy_"): + priority -= 4 + elif path_basename.startswith("npy"): + priority -= 3 + elif path_basename.startswith("np"): + priority -= 2 + if "config" in path_basename: + priority -= 1 + return priority, path + + +def process_files(file_list: list[str]) -> int: + n_out_of_order = 0 + submodule_paths = get_submodule_paths() + root_directory = os.path.dirname(os.path.dirname(__file__)) + for name_to_check in sorted(file_list, key=sort_order): + name_to_check = os.path.join(root_directory, name_to_check) + if any(submodule_path in name_to_check for submodule_path in submodule_paths): + continue + if ".dispatch." in name_to_check: + continue + try: + n_out_of_order += check_python_h_included_first(name_to_check) + except UnicodeDecodeError: + print(f"File {name_to_check:s} not utf-8", sys.stdout) + return n_out_of_order + + +def find_c_cpp_files(root: str) -> list[str]: + + result = [] + + for dirpath, dirnames, filenames in os.walk(root): + # I'm assuming other people have checked boost + for name in ("build", ".git", "boost"): + try: + dirnames.remove(name) + except ValueError: + pass + for name in fnmatch.filter(dirnames, "*.p"): + dirnames.remove(name) + result.extend( + [ + os.path.join(dirpath, name) + for name in filenames + if os.path.splitext(name)[1].lower() in C_CPP_EXTENSIONS + ] + ) + # Check the headers before the source files + result.sort(key=lambda path: "h" in os.path.splitext(path)[1], reverse=True) + return result + + +def diff_files(sha: str) -> list[str]: + """Find the diff since the given SHA. + + Adapted from lint.py + """ + res = subprocess.run( + [ + "git", + "diff", + "--name-only", + "--diff-filter=ACMR", + "-z", + sha, + "--", + # Check against C_CPP_EXTENSIONS + "*.[chCH]", + "*.[ch]pp", + "*.[ch]xx", + "*.cc", + "*.hh", + ], + stdout=subprocess.PIPE, + encoding="utf-8", + ) + res.check_returncode() + return [f for f in res.stdout.split("\0") if f] + + +if __name__ == "__main__": + args = PARSER.parse_args() + + if len(args.files) == 0: + files = find_c_cpp_files("numpy") + else: + files = args.files + if len(files) == 1 and os.path.isdir(files[0]): + files = find_c_cpp_files(files[0]) + + # See which of the headers include Python.h and add them to the list + n_out_of_order = process_files(files) + sys.exit(n_out_of_order) diff --git a/tools/ci/array-api-xfails.txt b/tools/ci/array-api-xfails.txt index 98c3895ced06..8370099015c5 100644 --- a/tools/ci/array-api-xfails.txt +++ b/tools/ci/array-api-xfails.txt @@ -1,5 +1,20 @@ # finfo return type misalignment array_api_tests/test_data_type_functions.py::test_finfo[float32] +array_api_tests/test_data_type_functions.py::test_finfo[complex64] + +# finfo: data type not inexact +array_api_tests/test_data_type_functions.py::test_finfo[float64] +array_api_tests/test_data_type_functions.py::test_finfo[complex128] + +# iinfo: Invalid integer data type 'O' +array_api_tests/test_data_type_functions.py::test_iinfo[int8] +array_api_tests/test_data_type_functions.py::test_iinfo[uint8] +array_api_tests/test_data_type_functions.py::test_iinfo[int16] +array_api_tests/test_data_type_functions.py::test_iinfo[uint16] +array_api_tests/test_data_type_functions.py::test_iinfo[int32] +array_api_tests/test_data_type_functions.py::test_iinfo[uint32] +array_api_tests/test_data_type_functions.py::test_iinfo[int64] +array_api_tests/test_data_type_functions.py::test_iinfo[uint64] # 'shape' arg is present. 'newshape' is retained for backward compat. array_api_tests/test_signatures.py::test_func_signature[reshape] diff --git a/tools/ci/check_c_api_usage.py b/tools/ci/check_c_api_usage.py new file mode 100644 index 000000000000..49c317a1259c --- /dev/null +++ b/tools/ci/check_c_api_usage.py @@ -0,0 +1,265 @@ +#!/usr/bin/env python3 +from __future__ import annotations + +import argparse +import os +import re +import sys +import tempfile +from concurrent.futures import ThreadPoolExecutor, as_completed +from pathlib import Path +from re import Pattern + +""" +Borrow-ref C API linter (Python version). + +- Recursively scans source files under --root (default: numpy) +- Matches suspicious CPython C-API calls as whole identifiers +- Skips: + - lines with '// noqa: borrowed-ref OK' or + '// noqa: borrowed-ref - manual fix needed' + - line comments (// ...) + - block comments (/* ... */), even when they span lines +- Prints findings and exits 1 if any issues found, else 0 +""" + +def strip_comments(line: str, in_block: bool) -> tuple[str, bool]: + """ + Return (code_without_comments, updated_in_block). + Removes // line comments and /* ... */ block comments (non-nesting, C-style). + """ + i = 0 + out_parts: list[str] = [] + n = len(line) + + while i < n: + if in_block: + end = line.find("*/", i) + if end == -1: + # Entire remainder is inside a block comment. + return ("".join(out_parts), True) + i = end + 2 + in_block = False + continue + + # Not in block: look for next // or /* from current i + sl = line.find("//", i) + bl = line.find("/*", i) + + if sl != -1 and (bl == -1 or sl < bl): + # Line comment starts first: take code up to '//' and stop + out_parts.append(line[i:sl]) + return ("".join(out_parts), in_block) + + if bl != -1: + # Block comment starts: take code up to '/*', then enter block + out_parts.append(line[i:bl]) + i = bl + 2 + in_block = True + continue + + # No more comments + out_parts.append(line[i:]) + break + + return ("".join(out_parts), in_block) + +def iter_source_files(root: Path, exts: set[str], excludes: set[str]) -> list[Path]: + """ + Return a list of source files under 'root', where filenames end with any of the + extensions in 'exts' (e.g., '.c.src', '.c', '.h'). + Excludes directories whose names are in 'excludes'. + """ + results: list[Path] = [] + + for dirpath, dirnames, filenames in os.walk(root): + # Prune excluded directories + dirnames[:] = [d for d in dirnames if d not in excludes] + for fn in filenames: + # endswith handles mult-suffice patterns, e.g., .c.src + if any(fn.endswith(ext) for ext in exts): + results.append(Path(dirpath) / fn) + return results + +def build_func_rx(funcs: tuple[str, ...]) -> Pattern[str]: + return re.compile(r"(? list[tuple[str, int, str, str]]: + """ + Scan a single file. + Returns list of (func_name, line_number, path_str, raw_line_str). + """ + hits: list[tuple[str, int, str, str]] = [] + in_block = False + noqa_set = set(noqa_markers) + + try: + with path.open("r", encoding="utf-8", errors="ignore") as f: + for lineno, raw in enumerate(f, 1): + # Skip if approved by noqa markers + if any(mark in raw for mark in noqa_set): + continue + + # Remove comments; if nothing remains, skip + code, in_block = strip_comments(raw.rstrip("\n"), in_block) + if not code.strip(): + continue + + # Find all suspicious calls in non-comment code + for m in func_rx.finditer(code): + hits.append((m.group(0), lineno, str(path), raw.rstrip("\n"))) + except FileNotFoundError: + # File may have disappeared; ignore gracefully + pass + return hits + + +def main(argv: list[str] | None = None) -> int: + # List of suspicious function calls: + suspicious_funcs: tuple[str, ...] = ( + "PyList_GetItem", + "PyDict_GetItem", + "PyDict_GetItemWithError", + "PyDict_GetItemString", + "PyDict_SetDefault", + "PyDict_Next", + "PyWeakref_GetObject", + "PyWeakref_GET_OBJECT", + "PyList_GET_ITEM", + "_PyDict_GetItemStringWithError", + "PySequence_Fast" + ) + func_rx = build_func_rx(suspicious_funcs) + noqa_markers = ( + "noqa: borrowed-ref OK", + "noqa: borrowed-ref - manual fix needed" + ) + default_exts = {".c", ".h", ".c.src", ".cpp"} + default_excludes = {"pythoncapi-compat"} + + ap = argparse.ArgumentParser(description="Borrow-ref C API linter (Python).") + ap.add_argument( + "--quiet", + action="store_true", + help="Suppress normal output; exit status alone indicates result (useful\ + for CI).", + ) + ap.add_argument( + "-j", "--jobs", + type=int, + default=0, + help="Number of worker threads (0=auto, 1=sequential).", + ) + ap.add_argument( + "--root", + default="numpy", + type=str, + help="Root directory to scan (default: numpy)" + ) + ap.add_argument( + "--ext", + action="append", + default=None, + help=f"File extension(s) to include (repeatable). Defaults to {default_exts}", + ) + ap.add_argument( + "--exclude", + action="append", + default=None, + help=f"Directory name(s) to exclude (repeatable). Default: {default_excludes}", + ) + args = ap.parse_args(argv) + + if args.ext: + exts = {e if e.startswith(".") else f".{e}" for e in args.ext} + else: + exts = set(default_exts) + excludes = set(args.exclude) if args.exclude else set(default_excludes) + + root = Path(args.root) + if not root.exists(): + print(f"error: root '{root}' does not exist", file=sys.stderr) + return 2 + + files = sorted(iter_source_files(root, exts, excludes), key=str) + + # Determine concurrency: auto picks a reasonable cap for I/O-bound work + if args.jobs is None or args.jobs <= 0: + max_workers = min(32, (os.cpu_count() or 1) * 5) + else: + max_workers = max(1, args.jobs) + print(f'Scanning {len(files)} C/C++ source files...\n') + + # Output file (mirrors your shell behavior) + tmpdir = Path(".tmp") + tmpdir.mkdir(exist_ok=True) + + findings = 0 + + # Run the scanning in parallel; only the main thread writes the report + all_hits: list[tuple[str, int, str, str]] = [] + if max_workers == 1: + for p in files: + all_hits.extend(scan_file(p, func_rx, noqa_markers)) + else: + with ThreadPoolExecutor(max_workers=max_workers) as ex: + fut_to_file = {ex.submit(scan_file, p, func_rx, noqa_markers): + p for p in files} + for fut in as_completed(fut_to_file): + try: + all_hits.extend(fut.result()) + except Exception as e: + print(f'Failed to scan {fut_to_file[fut]}: {e}') + + # Sort for deterministic output: by path, then line number + all_hits.sort(key=lambda t: (t[2], t[1])) + + # There no hits, linter passed + if not all_hits: + if not args.quiet: + print("All checks passed! C API borrow-ref linter found no issues.\n") + return 0 + + # There are some linter failures: create a log file + with tempfile.NamedTemporaryFile( + prefix="c_api_usage_report.", + suffix=".txt", + dir=tmpdir, + mode="w+", + encoding="utf-8", + delete=False, + ) as out: + report_path = Path(out.name) + out.write("Running Suspicious C API usage report workflow...\n\n") + for func, lineo, pstr, raw in all_hits: + findings += 1 + out.write(f"Found suspicious call to {func} in file: {pstr}\n") + out.write(f" -> {pstr}:{lineo}a:{raw}\n") + out.write("Recommendation:\n") + out.write( + "If this use is intentional and safe, add " + "'// noqa: borrowed-ref OK' on the same line " + "to silence this warning.\n" + ) + out.write( + "Otherwise, consider replacing the call " + "with a thread-safe API function.\n\n" + ) + + out.flush() + if not args.quiet: + out.seek(0) + sys.stdout.write(out.read()) + print(f"Report written to: {report_path}\n\n\ +C API borrow-ref linter FAILED.") + + return 1 + + +if __name__ == "__main__": + + sys.exit(main()) diff --git a/tools/ci/cirrus_arm.yml b/tools/ci/cirrus_arm.yml index 81a342f20e4e..977921d8236d 100644 --- a/tools/ci/cirrus_arm.yml +++ b/tools/ci/cirrus_arm.yml @@ -9,7 +9,7 @@ modified_clone: &MODIFIED_CLONE # it's a PR so clone the main branch then merge the changes from the PR git clone https://x-access-token:${CIRRUS_REPO_CLONE_TOKEN}@github.com/${CIRRUS_REPO_FULL_NAME}.git $CIRRUS_WORKING_DIR git fetch origin pull/$CIRRUS_PR/head:pull/$CIRRUS_PR - + # CIRRUS_BASE_BRANCH will probably be `main` for the majority of the time # However, if you do a PR against a maintenance branch we will want to # merge the PR into the maintenance branch, not main @@ -25,14 +25,14 @@ freebsd_test_task: use_compute_credits: $CIRRUS_USER_COLLABORATOR == 'true' compute_engine_instance: image_project: freebsd-org-cloud-dev - image: family/freebsd-14-2 + image: family/freebsd-14-3 platform: freebsd cpu: 1 memory: 4G install_devtools_script: | pkg install -y git bash ninja ccache blas cblas lapack pkgconf - pkg install -y python311 + pkg install -y python312 <<: *MODIFIED_CLONE @@ -45,22 +45,22 @@ freebsd_test_task: prepare_env_script: | # Create a venv (the `source` command needs bash, not the default sh shell) chsh -s /usr/local/bin/bash - python3.11 -m venv .venv + python3.12 -m venv .venv source .venv/bin/activate # Minimal build and test requirements - python3.11 -m pip install -U pip - python3.11 -m pip install meson-python Cython pytest hypothesis + python3.12 -m pip install -U pip + python3.12 -m pip install meson-python Cython pytest hypothesis build_script: | chsh -s /usr/local/bin/bash source .venv/bin/activate - python3.11 -m pip install . --no-build-isolation -v -Csetup-args="-Dallow-noblas=false" + python3.12 -m pip install . --no-build-isolation -v -Csetup-args="-Dallow-noblas=false" test_script: | chsh -s /usr/local/bin/bash source .venv/bin/activate cd tools - python3.11 -m pytest --pyargs numpy -m "not slow" + python3.12 -m pytest --pyargs numpy -m "not slow" ccache -s on_failure: diff --git a/tools/ci/cirrus_wheels.yml b/tools/ci/cirrus_wheels.yml deleted file mode 100644 index b531e953daee..000000000000 --- a/tools/ci/cirrus_wheels.yml +++ /dev/null @@ -1,118 +0,0 @@ -###################################################################### -# Build macosx_arm64 natively -# -# macosx_arm64 for macos >= 14 used to be built here, but are now -# built on GHA. -###################################################################### - -macosx_arm64_task: - use_compute_credits: $CIRRUS_USER_COLLABORATOR == 'true' - env: - CIRRUS_CLONE_SUBMODULES: true - macos_instance: - matrix: - image: ghcr.io/cirruslabs/macos-monterey-xcode - - matrix: - - env: - CIBW_BUILD: cp311-* cp312* cp313* - env: - PATH: /usr/local/lib:/usr/local/include:$PATH - CIBW_ARCHS: arm64 - - build_script: | - brew install micromamba gfortran - micromamba shell init -s bash --root-prefix ~/micromamba - source ~/.bash_profile - - micromamba create -n numpydev - micromamba activate numpydev - micromamba install -y -c conda-forge python=3.11 2>/dev/null - - # Use scipy-openblas wheels - export INSTALL_OPENBLAS=true - export CIBW_ENVIRONMENT_MACOS="MACOSX_DEPLOYMENT_TARGET='11.0' INSTALL_OPENBLAS=true RUNNER_OS=macOS PKG_CONFIG_PATH=$PWD/.openblas" - - # needed for submodules - git submodule update --init - # need to obtain all the tags so setup.py can determine FULLVERSION - git fetch origin - uname -m - python -c "import platform;print(platform.python_version());print(platform.system());print(platform.machine())" - clang --version - - python -m pip install cibuildwheel - cibuildwheel - - wheels_artifacts: - path: "wheelhouse/*" - -###################################################################### -# Upload all wheels -###################################################################### - -wheels_upload_task: - use_compute_credits: $CIRRUS_USER_COLLABORATOR == 'true' - # Artifacts don't seem to be persistent from task to task. - # Rather than upload wheels at the end of each cibuildwheel run we do a - # final upload here. This is because a run may be on different OS for - # which bash, etc, may not be present. - depends_on: - - macosx_arm64 - compute_engine_instance: - image_project: cirrus-images - image: family/docker-builder - platform: linux - cpu: 1 - - env: - NUMPY_STAGING_UPLOAD_TOKEN: ENCRYPTED[!5a69522ae0c2af9edb2bc1cdfeaca6292fb3666d9ecd82dca0615921834a6ce3b702352835d8bde4ea2a9ed5ef8424ac!] - NUMPY_NIGHTLY_UPLOAD_TOKEN: ENCRYPTED[4376691390321cd5e76613ec21de8456cc0af0164971dd9542f985a017dc30ccb4d40e60f59184618e2d55afd63e93b7] - - upload_script: | - apt-get update - apt-get install -y curl wget - export IS_SCHEDULE_DISPATCH="false" - export IS_PUSH="false" - - # cron job - if [[ "$CIRRUS_CRON" == "nightly" ]]; then - export IS_SCHEDULE_DISPATCH="true" - fi - - # a manual build was started - if [[ "$CIRRUS_BUILD_SOURCE" == "api" && "$CIRRUS_COMMIT_MESSAGE" == "API build for null" ]]; then - export IS_SCHEDULE_DISPATCH="true" - fi - - # only upload wheels to staging if it's a tag beginning with 'v' and you're - # on a maintenance branch - if [[ "$CIRRUS_TAG" == v* ]] && [[ $CIRRUS_TAG != *"dev0"* ]]; then - export IS_PUSH="true" - fi - - if [[ $IS_PUSH == "true" ]] || [[ $IS_SCHEDULE_DISPATCH == "true" ]]; then - # install miniconda in the home directory. For some reason HOME isn't set by Cirrus - export HOME=$PWD - - # install miniconda for uploading to anaconda - wget -q https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh -O miniconda.sh - bash miniconda.sh -b -p $HOME/miniconda3 - $HOME/miniconda3/bin/conda init bash - source $HOME/miniconda3/bin/activate - conda install -y anaconda-client - - # The name of the zip file is derived from the `wheels_artifact` line. - # If you change the artifact line to `myfile_artifact` then it would be - # called myfile.zip - - curl https://api.cirrus-ci.com/v1/artifact/build/$CIRRUS_BUILD_ID/wheels.zip --output wheels.zip - unzip wheels.zip - - source ./tools/wheels/upload_wheels.sh - # IS_PUSH takes precedence over IS_SCHEDULE_DISPATCH - set_upload_vars - - # Will be skipped if not a push/tag/scheduled build - upload_wheels - fi diff --git a/tools/ci/run_32_bit_linux_docker.sh b/tools/ci/run_32_bit_linux_docker.sh deleted file mode 100644 index bb0aedf88fcf..000000000000 --- a/tools/ci/run_32_bit_linux_docker.sh +++ /dev/null @@ -1,14 +0,0 @@ -set -xe - -git config --global --add safe.directory /numpy -cd /numpy -/opt/python/cp311-cp311/bin/python -mvenv venv -source venv/bin/activate -pip install -r requirements/ci32_requirements.txt -python3 -m pip install -r requirements/test_requirements.txt -echo CFLAGS \$CFLAGS -spin config-openblas --with-scipy-openblas=32 -export PKG_CONFIG_PATH=/numpy/.openblas -python3 -m pip install . -cd tools -python3 -m pytest --pyargs numpy diff --git a/tools/ci/ubsan_suppressions_arm64.txt b/tools/ci/ubsan_suppressions_arm64.txt new file mode 100644 index 000000000000..69de4a4c425f --- /dev/null +++ b/tools/ci/ubsan_suppressions_arm64.txt @@ -0,0 +1,51 @@ +# This file contains suppressions for the default (with GIL) build to prevent runtime errors +# when numpy is built with -Db_sanitize=undefined for arm64 architecture +# +# reference: https://clang.llvm.org/docs/UndefinedBehaviorSanitizer.html#available-checks + +# Per this prior discussion, integer overflow is not a concern +# https://github.com/numpy/numpy/issues/24209#issuecomment-2160154181 +signed-integer-overflow:* + +# all alignment runtime errors ignored in favor of this being tracked via TypeSanitizer +# otherwise ubsan may detect system file alignment errors outside numpy +alignment:* + +# suggested fix for runtime error: use INT_MIN constant +shift-base:_core/src/umath/_rational_tests.c +# suggested fix for runtime error: check for overflow if signed +shift-base:_core/src/npymath/npy_math_internal.h + +# suggested fix for runtime error: null check before loop +pointer-overflow:_core/src/common/simd/neon/memory.h +pointer-overflow:_core/src/multiarray/datetime_busdaycal.c +pointer-overflow:_core/src/multiarray/nditer_templ.c +pointer-overflow:_core/src/multiarray/nditer_constr.c +pointer-overflow:_core/src/umath/loops_arithm_fp.dispatch.c.src +pointer-overflow:_core/src/umath/loops_unary.dispatch.c.src +pointer-overflow:_core/src/umath/loops_unary_complex.dispatch.c.src +pointer-overflow:_core/src/umath/loops_unary_fp_le.dispatch.c.src +pointer-overflow:_core/src/umath/string_buffer.h +pointer-overflow:linalg/umath_linalg.cpp +pointer-overflow:numpy/random/bit_generator.pyx.c + +float-cast-overflow:_core/src/multiarray/lowlevel_strided_loops.c.src + +# flagged in CI - call to function through pointer to incorrect function type +# Many functions in the modules/files listed below cause undefined behavior in CI +# general disable this check until further investigation, but keep the specific files +# as a starting point for resolving the checks later +function:_core/src/* +function:numpy/random/* +# function:_core/src/common/cblasfunc.c +# function:_core/src/common/npy_argparse.c +# function:_core/src/multiarray/number.c +# function:_core/src/multiarray/ctors.c +# function:_core/src/multiarray/convert_datatype.c +# function:_core/src/multiarray/dtype_transfer.c +# function:_core/src/multiarray/dtype_traversal.c +# function:_core/src/multiarray/getset.c +# function:_core/src/multiarray/scalarapi.c +# function:_core/src/multiarray/scalartypes.c.src +# function:_core/src/umath/* +# function:numpy/random/* diff --git a/tools/ci/ubsan_suppressions_x86_64.txt b/tools/ci/ubsan_suppressions_x86_64.txt new file mode 100644 index 000000000000..5e4316ce3715 --- /dev/null +++ b/tools/ci/ubsan_suppressions_x86_64.txt @@ -0,0 +1,28 @@ +# This file contains suppressions for the default (with GIL) build to prevent runtime errors +# when numpy is built with -Db_sanitize=undefined for x86_64 architecture +# +# reference: https://clang.llvm.org/docs/UndefinedBehaviorSanitizer.html#available-checks + +# Per this prior discussion, integer overflow is not a concern +# https://github.com/numpy/numpy/issues/24209#issuecomment-2160154181 +signed-integer-overflow:* + +# all alignment runtime errors ignored in favor of this being tracked via TypeSanitizer +# otherwise ubsan may detect system file alignment errors outside numpy +alignment:* + +# suggested fix for runtime error: use INT_MIN constant +shift-base:_core/src/umath/_rational_tests.c +# suggested fix for runtime error: check for overflow if signed +shift-base:_core/src/npymath/npy_math_internal.h + + +# suggested fix for runtime error: check that pointer is not null before calling function +nonnull-attribute:_core/src/multiarray/array_coercion.c +nonnull-attribute:_core/src/multiarray/ctors.c +nonnull-attribute:_core/src/multiarray/datetime_busdaycal.c +nonnull-attribute:_core/src/multiarray/scalarapi.c +nonnull-attribute:_core/src/multiarray/calculation.c + +# suggested fix for runtime error: null check before loop +pointer-overflow:_core/src/multiarray/nditer_templ.c diff --git a/tools/get_submodule_paths.py b/tools/get_submodule_paths.py new file mode 100644 index 000000000000..abab86140712 --- /dev/null +++ b/tools/get_submodule_paths.py @@ -0,0 +1,31 @@ +import glob +import os.path + + +def get_submodule_paths(): + ''' + Get paths to submodules so that we can exclude them from things like + check_test_name.py, check_unicode.py, etc. + ''' + root_directory = os.path.dirname(os.path.dirname(__file__)) + gitmodule_file = os.path.join(root_directory, '.gitmodules') + with open(gitmodule_file) as gitmodules: + data = gitmodules.read().split('\n') + submodule_paths = [datum.split(' = ')[1] for datum in data if + datum.startswith('\tpath = ')] + submodule_paths = [os.path.join(root_directory, path) for path in + submodule_paths] + # vendored with a script rather than via gitmodules + with open( + os.path.join(root_directory, ".gitattributes"), "r" + ) as attr_file: + for line in attr_file: + if "vendored" in line: + pattern = line.split(" ", 1)[0] + submodule_paths.extend(glob.glob(pattern)) + + return submodule_paths + + +if __name__ == "__main__": + print('\n'.join(get_submodule_paths())) diff --git a/tools/linter.py b/tools/linter.py index 1ce9ca763343..4e9aed85054a 100644 --- a/tools/linter.py +++ b/tools/linter.py @@ -18,6 +18,7 @@ def run_ruff(self, fix: bool) -> tuple[int, str]: Unlike pycodestyle, ruff by itself is not capable of limiting its output to the given diff. """ + print("Running Ruff Check...") command = ["ruff", "check"] if fix: command.append("--fix") @@ -30,13 +31,58 @@ def run_ruff(self, fix: bool) -> tuple[int, str]: ) return res.returncode, res.stdout + def run_cython_lint(self) -> tuple[int, str]: + print("Running cython-lint...") + command = ["cython-lint", "--no-pycodestyle", "numpy"] + + res = subprocess.run( + command, + stdout=subprocess.PIPE, + cwd=self.repository_root, + encoding="utf-8", + ) + return res.returncode, res.stdout + def run_lint(self, fix: bool) -> None: - retcode, errors = self.run_ruff(fix) - errors and print(errors) + # Ruff Linter + retcode, ruff_errors = self.run_ruff(fix) + ruff_errors and print(ruff_errors) + + if retcode: + sys.exit(retcode) + + # C API Borrowed-ref Linter + retcode, c_API_errors = self.run_check_c_api() + c_API_errors and print(c_API_errors) + + if retcode: + sys.exit(retcode) + + # Cython Linter + retcode, cython_errors = self.run_cython_lint() + cython_errors and print(cython_errors) sys.exit(retcode) + def run_check_c_api(self) -> tuple[int, str]: + """Run C-API borrowed-ref checker""" + print("Running C API borrow-reference linter...") + borrowed_ref_script = os.path.join( + self.repository_root, "tools", "ci", "check_c_api_usage.py" + ) + borrowed_res = subprocess.run( + [sys.executable, borrowed_ref_script], + cwd=self.repository_root, + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + text=True, + check=False, + ) + + # Exit with non-zero if C API Check fails + return borrowed_res.returncode, borrowed_res.stdout + if __name__ == "__main__": parser = ArgumentParser() diff --git a/tools/stubtest/allowlist.txt b/tools/stubtest/allowlist.txt new file mode 100644 index 000000000000..58110d66046e --- /dev/null +++ b/tools/stubtest/allowlist.txt @@ -0,0 +1,198 @@ +# intentional type-check-only deviations from runtime +numpy\._typing.* +numpy\.polynomial\._polytypes + +# internal testing code +numpy\.conftest.* +numpy\.random\._generator\.__test__ +numpy(\.\w+)?\.tests.* + +# system-dependent extended precision types +numpy(\..+)?\.float(96|128) +numpy(\..+)?\.complex(192|256) + +# system-dependent SIMD constants +numpy\._core\._simd\.\w+ + +# these are always either float96/complex192 or float128/complex256 +numpy\.__all__ +numpy\._?core\.__all__ +numpy\._?core\.numeric\.__all__ +numpy\._?core\.numerictypes\.__all__ +numpy\.matlib\.__all__ + +# requires numpy/_core/code_generators to be on the PYTHONPATH when running stubtest +numpy\._core\.cversions + +# raises SystemExit on import +numpy\.f2py\.__main__ + +# inexpressible: the `dtype.type` class-attribute is `None` unless instantiated +numpy(\..+)?\.dtype\.type + +# import errors +numpy\._pyinstaller\..* + +# known minor deviations from runtime +numpy\.(\w+\.)*integer\.__index__ +numpy\.(\w+\.)*integer\.bit_count +numpy\.(\w+\.)*floating\.as_integer_ratio +numpy\.(\w+\.)*floating\.is_integer +numpy\.(\w+\.)*complexfloating\.__complex__ + +# intentionally missing deprecated module stubs +numpy\.core\._dtype +numpy\.core\._dtype_ctypes +numpy\.core\._internal +numpy\.core\._multiarray_umath.* +numpy\.core\.arrayprint.* +numpy\.core\.defchararray.* +numpy\.core\.einsumfunc.* +numpy\.core\.fromnumeric.* +numpy\.core\.function_base.* +numpy\.core\.getlimits.* +numpy\.core\.multiarray.* +numpy\.core\.numeric.* +numpy\.core\.overrides +numpy\.core\.records.* +numpy\.core\.shape_base.* +numpy\.core\.umath.* +numpy\.typing\.mypy_plugin + +# false positive "... is not a Union" errors +numpy\.typing\.ArrayLike +numpy\.typing\.DTypeLike +numpy\.typing\.NDArray + +# ufuncs, see https://github.com/python/mypy/issues/20223 +numpy\.(\w+\.)*abs +numpy\.(\w+\.)*absolute +numpy\.(\w+\.)*acos +numpy\.(\w+\.)*acosh +numpy\.(\w+\.)*add +numpy\.(\w+\.)*arccos +numpy\.(\w+\.)*arccosh +numpy\.(\w+\.)*arcsin +numpy\.(\w+\.)*arcsinh +numpy\.(\w+\.)*arctan +numpy\.(\w+\.)*arctan2 +numpy\.(\w+\.)*arctanh +numpy\.(\w+\.)*asin +numpy\.(\w+\.)*asinh +numpy\.(\w+\.)*atan +numpy\.(\w+\.)*atan2 +numpy\.(\w+\.)*atanh +numpy\.(\w+\.)*bitwise_and +numpy\.(\w+\.)*bitwise_count +numpy\.(\w+\.)*bitwise_invert +numpy\.(\w+\.)*bitwise_left_shift +numpy\.(\w+\.)*bitwise_not +numpy\.(\w+\.)*bitwise_or +numpy\.(\w+\.)*bitwise_right_shift +numpy\.(\w+\.)*bitwise_xor +numpy\.(\w+\.)*cbrt +numpy\.(\w+\.)*ceil +numpy\.(\w+\.)*conj +numpy\.(\w+\.)*conjugate +numpy\.(\w+\.)*copysign +numpy\.(\w+\.)*cos +numpy\.(\w+\.)*cosh +numpy\.(\w+\.)*deg2rad +numpy\.(\w+\.)*degrees +numpy\.(\w+\.)*divide +numpy\.(\w+\.)*divmod +numpy\.(\w+\.)*equal +numpy\.(\w+\.)*exp +numpy\.(\w+\.)*exp2 +numpy\.(\w+\.)*expm1 +numpy\.(\w+\.)*fabs +numpy\.(\w+\.)*float_power +numpy\.(\w+\.)*floor +numpy\.(\w+\.)*floor_divide +numpy\.(\w+\.)*fmax +numpy\.(\w+\.)*fmin +numpy\.(\w+\.)*fmod +numpy\.(\w+\.)*frexp +numpy\.(\w+\.)*gcd +numpy\.(\w+\.)*greater +numpy\.(\w+\.)*greater_equal +numpy\.(\w+\.)*heaviside +numpy\.(\w+\.)*hypot +numpy\.(\w+\.)*invert +numpy\.(\w+\.)*isfinite +numpy\.(\w+\.)*isinf +numpy\.(\w+\.)*isnan +numpy\.(\w+\.)*isnat +numpy\.(\w+\.)*lcm +numpy\.(\w+\.)*ldexp +numpy\.(\w+\.)*left_shift +numpy\.(\w+\.)*less +numpy\.(\w+\.)*less_equal +numpy\.(\w+\.)*log +numpy\.(\w+\.)*log10 +numpy\.(\w+\.)*log1p +numpy\.(\w+\.)*log2 +numpy\.(\w+\.)*logaddexp +numpy\.(\w+\.)*logaddexp2 +numpy\.(\w+\.)*logical_and +numpy\.(\w+\.)*logical_not +numpy\.(\w+\.)*logical_or +numpy\.(\w+\.)*logical_xor +numpy\.(\w+\.)*matmul +numpy\.(\w+\.)*matvec +numpy\.(\w+\.)*maximum +numpy\.(\w+\.)*minimum +numpy\.(\w+\.)*mod +numpy\.(\w+\.)*modf +numpy\.(\w+\.)*multiply +numpy\.(\w+\.)*negative +numpy\.(\w+\.)*nextafter +numpy\.(\w+\.)*not_equal +numpy\.(\w+\.)*positive +numpy\.(\w+\.)*pow +numpy\.(\w+\.)*power +numpy\.(\w+\.)*rad2deg +numpy\.(\w+\.)*radians +numpy\.(\w+\.)*reciprocal +numpy\.(\w+\.)*remainder +numpy\.(\w+\.)*right_shift +numpy\.(\w+\.)*rint +numpy\.(\w+\.)*sign +numpy\.(\w+\.)*signbit +numpy\.(\w+\.)*sin +numpy\.(\w+\.)*sinh +numpy\.(\w+\.)*spacing +numpy\.(\w+\.)*sqrt +numpy\.(\w+\.)*square +numpy\.(\w+\.)*subtract +numpy\.(\w+\.)*tan +numpy\.(\w+\.)*tanh +numpy\.(\w+\.)*true_divide +numpy\.(\w+\.)*trunc +numpy\.(\w+\.)*vecdot +numpy\.(\w+\.)*vecmat +numpy\.(\w+\.)*isalnum +numpy\.(\w+\.)*isalpha +numpy\.(\w+\.)*isdecimal +numpy\.(\w+\.)*isdigit +numpy\.(\w+\.)*islower +numpy\.(\w+\.)*isnumeric +numpy\.(\w+\.)*isspace +numpy\.(\w+\.)*istitle +numpy\.(\w+\.)*isupper +numpy\.(\w+\.)*str_len +numpy\._core\._methods\.umr_bitwise_count +numpy\._core\._umath_tests\.always_error +numpy\._core\._umath_tests\.always_error_gufunc +numpy\._core\._umath_tests\.always_error_unary +numpy\._core\._umath_tests\.conv1d_full +numpy\._core\._umath_tests\.cross1d +numpy\._core\._umath_tests\.euclidean_pdist +numpy\._core\._umath_tests\.indexed_negative +numpy\._core\._umath_tests\.inner1d +numpy\._core\._umath_tests\.inner1d_no_doc +numpy\._core\._umath_tests\.matrix_multiply +numpy\.linalg\._umath_linalg\.qr_complete +numpy\.linalg\._umath_linalg\.qr_reduced +numpy\.linalg\._umath_linalg\.solve +numpy\.linalg\._umath_linalg\.solve1 diff --git a/tools/stubtest/mypy.ini b/tools/stubtest/mypy.ini new file mode 100644 index 000000000000..4c75171acffe --- /dev/null +++ b/tools/stubtest/mypy.ini @@ -0,0 +1,24 @@ +[mypy] +files = numpy/**/*.pyi +exclude = (?x)( + ^numpy/( + .+\.py$ + | _build_utils/ + | _core/code_generators/ + ) + ) +namespace_packages = False + +enable_error_code = deprecated, ignore-without-code, redundant-expr, truthy-bool +warn_unreachable = False +strict = True +strict_bytes = True +allow_redefinition_new = True +local_partial_types = True + +; Stubtest requires mypy to pass before running, which would currently fail +; on numerous stubs. To allow running stubtest independently, we temporarily +; ignore these errors here. The goal is to eventually fix these mypy errors +; and remove the ignores. +; See also https://github.com/numpy/numpy/issues/27032 +disable_error_code = overload-overlap diff --git a/tools/swig/test/testArray.py b/tools/swig/test/testArray.py index a8528207c167..b9b4af3319ae 100755 --- a/tools/swig/test/testArray.py +++ b/tools/swig/test/testArray.py @@ -378,9 +378,9 @@ def testView(self): # Build the test suite suite = unittest.TestSuite() - suite.addTest(unittest.makeSuite(Array1TestCase)) - suite.addTest(unittest.makeSuite(Array2TestCase)) - suite.addTest(unittest.makeSuite(ArrayZTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase(Array1TestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase(Array2TestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase(ArrayZTestCase)) # Execute the test suite print("Testing Classes of Module Array") diff --git a/tools/swig/test/testFarray.py b/tools/swig/test/testFarray.py index a9310e20a897..3798029dbe4b 100755 --- a/tools/swig/test/testFarray.py +++ b/tools/swig/test/testFarray.py @@ -2,6 +2,7 @@ import os import sys import unittest + from distutils.util import get_platform import numpy as np @@ -149,7 +150,7 @@ def testView(self): # Build the test suite suite = unittest.TestSuite() - suite.addTest(unittest.makeSuite(FarrayTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase(FarrayTestCase)) # Execute the test suite print("Testing Classes of Module Farray") diff --git a/tools/swig/test/testFlat.py b/tools/swig/test/testFlat.py index ce6f74819e86..43ed84bcfa06 100755 --- a/tools/swig/test/testFlat.py +++ b/tools/swig/test/testFlat.py @@ -43,7 +43,7 @@ def testProcess3D(self): for i in range(24): pack_output += struct.pack(self.typeCode, i) x = np.frombuffer(pack_output, dtype=self.typeCode) - x.shape = (2, 3, 4) + x = x.reshape((2, 3, 4)) y = x.copy() process(y) self.assertEqual(np.all((x + 1) == y), True) @@ -56,7 +56,7 @@ def testProcess3DTranspose(self): for i in range(24): pack_output += struct.pack(self.typeCode, i) x = np.frombuffer(pack_output, dtype=self.typeCode) - x.shape = (2, 3, 4) + x = x.reshape((2, 3, 4)) y = x.copy() process(y.T) self.assertEqual(np.all((x.T + 1) == y.T), True) @@ -69,7 +69,7 @@ def testProcessNoncontiguous(self): for i in range(24): pack_output += struct.pack(self.typeCode, i) x = np.frombuffer(pack_output, dtype=self.typeCode) - x.shape = (2, 3, 4) + x = x.reshape((2, 3, 4)) self.assertRaises(TypeError, process, x[:, :, 0]) @@ -176,18 +176,18 @@ def __init__(self, methodName="runTest"): # Build the test suite suite = unittest.TestSuite() - suite.addTest(unittest.makeSuite( scharTestCase)) - suite.addTest(unittest.makeSuite( ucharTestCase)) - suite.addTest(unittest.makeSuite( shortTestCase)) - suite.addTest(unittest.makeSuite( ushortTestCase)) - suite.addTest(unittest.makeSuite( intTestCase)) - suite.addTest(unittest.makeSuite( uintTestCase)) - suite.addTest(unittest.makeSuite( longTestCase)) - suite.addTest(unittest.makeSuite( ulongTestCase)) - suite.addTest(unittest.makeSuite( longLongTestCase)) - suite.addTest(unittest.makeSuite(ulongLongTestCase)) - suite.addTest(unittest.makeSuite( floatTestCase)) - suite.addTest(unittest.makeSuite( doubleTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase( scharTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase( ucharTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase( shortTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase( ushortTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase( intTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase( uintTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase( longTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase( ulongTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase( longLongTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase(ulongLongTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase( floatTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase( doubleTestCase)) # Execute the test suite print("Testing 1D Functions of Module Flat") diff --git a/tools/swig/test/testFortran.py b/tools/swig/test/testFortran.py index 498732f3118f..8b23af610481 100644 --- a/tools/swig/test/testFortran.py +++ b/tools/swig/test/testFortran.py @@ -140,18 +140,18 @@ def __init__(self, methodName="runTest"): # Build the test suite suite = unittest.TestSuite() - suite.addTest(unittest.makeSuite( scharTestCase)) - suite.addTest(unittest.makeSuite( ucharTestCase)) - suite.addTest(unittest.makeSuite( shortTestCase)) - suite.addTest(unittest.makeSuite( ushortTestCase)) - suite.addTest(unittest.makeSuite( intTestCase)) - suite.addTest(unittest.makeSuite( uintTestCase)) - suite.addTest(unittest.makeSuite( longTestCase)) - suite.addTest(unittest.makeSuite( ulongTestCase)) - suite.addTest(unittest.makeSuite( longLongTestCase)) - suite.addTest(unittest.makeSuite(ulongLongTestCase)) - suite.addTest(unittest.makeSuite( floatTestCase)) - suite.addTest(unittest.makeSuite( doubleTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase( scharTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase( ucharTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase( shortTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase( ushortTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase( intTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase( uintTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase( longTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase( ulongTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase( longLongTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase(ulongLongTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase( floatTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase( doubleTestCase)) # Execute the test suite print("Testing 2D Functions of Module Matrix") diff --git a/tools/swig/test/testMatrix.py b/tools/swig/test/testMatrix.py index d20312ecc2a0..d3151a0fb857 100755 --- a/tools/swig/test/testMatrix.py +++ b/tools/swig/test/testMatrix.py @@ -339,18 +339,18 @@ def __init__(self, methodName="runTest"): # Build the test suite suite = unittest.TestSuite() - suite.addTest(unittest.makeSuite( scharTestCase)) - suite.addTest(unittest.makeSuite( ucharTestCase)) - suite.addTest(unittest.makeSuite( shortTestCase)) - suite.addTest(unittest.makeSuite( ushortTestCase)) - suite.addTest(unittest.makeSuite( intTestCase)) - suite.addTest(unittest.makeSuite( uintTestCase)) - suite.addTest(unittest.makeSuite( longTestCase)) - suite.addTest(unittest.makeSuite( ulongTestCase)) - suite.addTest(unittest.makeSuite( longLongTestCase)) - suite.addTest(unittest.makeSuite(ulongLongTestCase)) - suite.addTest(unittest.makeSuite( floatTestCase)) - suite.addTest(unittest.makeSuite( doubleTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase( scharTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase( ucharTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase( shortTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase( ushortTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase( intTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase( uintTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase( longTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase( ulongTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase( longLongTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase(ulongLongTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase( floatTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase( doubleTestCase)) # Execute the test suite print("Testing 2D Functions of Module Matrix") diff --git a/tools/swig/test/testSuperTensor.py b/tools/swig/test/testSuperTensor.py index e0027428e647..f49a0aa07a90 100644 --- a/tools/swig/test/testSuperTensor.py +++ b/tools/swig/test/testSuperTensor.py @@ -374,18 +374,18 @@ def __init__(self, methodName="runTest"): # Build the test suite suite = unittest.TestSuite() - suite.addTest(unittest.makeSuite( scharTestCase)) - suite.addTest(unittest.makeSuite( ucharTestCase)) - suite.addTest(unittest.makeSuite( shortTestCase)) - suite.addTest(unittest.makeSuite( ushortTestCase)) - suite.addTest(unittest.makeSuite( intTestCase)) - suite.addTest(unittest.makeSuite( uintTestCase)) - suite.addTest(unittest.makeSuite( longTestCase)) - suite.addTest(unittest.makeSuite( ulongTestCase)) - suite.addTest(unittest.makeSuite( longLongTestCase)) - suite.addTest(unittest.makeSuite(ulongLongTestCase)) - suite.addTest(unittest.makeSuite( floatTestCase)) - suite.addTest(unittest.makeSuite( doubleTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase( scharTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase( ucharTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase( shortTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase( ushortTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase( intTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase( uintTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase( longTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase( ulongTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase( longLongTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase(ulongLongTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase( floatTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase( doubleTestCase)) # Execute the test suite print("Testing 4D Functions of Module SuperTensor") diff --git a/tools/swig/test/testTensor.py b/tools/swig/test/testTensor.py index aa962b0cbcda..536d848e6135 100755 --- a/tools/swig/test/testTensor.py +++ b/tools/swig/test/testTensor.py @@ -99,7 +99,7 @@ def testMaxWrongDim(self): "Test max function with wrong dimensions" print(self.typeStr, "... ", end=' ', file=sys.stderr) max = Tensor.__dict__[self.typeStr + "Max"] - self.assertRaises(TypeError, max, [0, -1, 2, -3]) + self.assertRaises(TypeError, max, [0, 1, 2, 3]) # Test (int DIM1, int DIM2, int DIM3, type* IN_ARRAY3) typemap def testMin(self): @@ -379,18 +379,18 @@ def __init__(self, methodName="runTest"): # Build the test suite suite = unittest.TestSuite() - suite.addTest(unittest.makeSuite( scharTestCase)) - suite.addTest(unittest.makeSuite( ucharTestCase)) - suite.addTest(unittest.makeSuite( shortTestCase)) - suite.addTest(unittest.makeSuite( ushortTestCase)) - suite.addTest(unittest.makeSuite( intTestCase)) - suite.addTest(unittest.makeSuite( uintTestCase)) - suite.addTest(unittest.makeSuite( longTestCase)) - suite.addTest(unittest.makeSuite( ulongTestCase)) - suite.addTest(unittest.makeSuite( longLongTestCase)) - suite.addTest(unittest.makeSuite(ulongLongTestCase)) - suite.addTest(unittest.makeSuite( floatTestCase)) - suite.addTest(unittest.makeSuite( doubleTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase( scharTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase( ucharTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase( shortTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase( ushortTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase( intTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase( uintTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase( longTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase( ulongTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase( longLongTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase(ulongLongTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase( floatTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase( doubleTestCase)) # Execute the test suite print("Testing 3D Functions of Module Tensor") diff --git a/tools/swig/test/testVector.py b/tools/swig/test/testVector.py index f0b51715d1d5..15ad96da4503 100755 --- a/tools/swig/test/testVector.py +++ b/tools/swig/test/testVector.py @@ -358,18 +358,18 @@ def __init__(self, methodName="runTest"): # Build the test suite suite = unittest.TestSuite() - suite.addTest(unittest.makeSuite( scharTestCase)) - suite.addTest(unittest.makeSuite( ucharTestCase)) - suite.addTest(unittest.makeSuite( shortTestCase)) - suite.addTest(unittest.makeSuite( ushortTestCase)) - suite.addTest(unittest.makeSuite( intTestCase)) - suite.addTest(unittest.makeSuite( uintTestCase)) - suite.addTest(unittest.makeSuite( longTestCase)) - suite.addTest(unittest.makeSuite( ulongTestCase)) - suite.addTest(unittest.makeSuite( longLongTestCase)) - suite.addTest(unittest.makeSuite(ulongLongTestCase)) - suite.addTest(unittest.makeSuite( floatTestCase)) - suite.addTest(unittest.makeSuite( doubleTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase( scharTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase( ucharTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase( shortTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase( ushortTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase( intTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase( uintTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase( longTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase( ulongTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase( longLongTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase(ulongLongTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase( floatTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase( doubleTestCase)) # Execute the test suite print("Testing 1D Functions of Module Vector") diff --git a/tools/wheels/LICENSE_linux.txt b/tools/wheels/LICENSE_linux.txt deleted file mode 100644 index 9e2d9053b8a7..000000000000 --- a/tools/wheels/LICENSE_linux.txt +++ /dev/null @@ -1,902 +0,0 @@ - ----- - -This binary distribution of NumPy also bundles the following software: - - -Name: OpenBLAS -Files: numpy.libs/libscipy_openblas*.so -Description: bundled as a dynamically linked library -Availability: https://github.com/OpenMathLib/OpenBLAS/ -License: BSD-3-Clause - Copyright (c) 2011-2014, The OpenBLAS Project - All rights reserved. - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are - met: - - 1. Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - - 2. Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - 3. Neither the name of the OpenBLAS project nor the names of - its contributors may be used to endorse or promote products - derived from this software without specific prior written - permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR - SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER - CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, - OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE - USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - -Name: LAPACK -Files: numpy.libs/libscipy_openblas*.so -Description: bundled in OpenBLAS -Availability: https://github.com/OpenMathLib/OpenBLAS/ -License: BSD-3-Clause-Attribution - Copyright (c) 1992-2013 The University of Tennessee and The University - of Tennessee Research Foundation. All rights - reserved. - Copyright (c) 2000-2013 The University of California Berkeley. All - rights reserved. - Copyright (c) 2006-2013 The University of Colorado Denver. All rights - reserved. - - $COPYRIGHT$ - - Additional copyrights may follow - - $HEADER$ - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are - met: - - - Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - - - Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer listed - in this license in the documentation and/or other materials - provided with the distribution. - - - Neither the name of the copyright holders nor the names of its - contributors may be used to endorse or promote products derived from - this software without specific prior written permission. - - The copyright holders provide no reassurances that the source code - provided does not infringe any patent, copyright, or any other - intellectual property rights of third parties. The copyright holders - disclaim any liability to any recipient for claims brought against - recipient by any third party for infringement of that parties - intellectual property rights. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - -Name: GCC runtime library -Files: numpy.libs/libgfortran*.so -Description: dynamically linked to files compiled with gcc -Availability: https://gcc.gnu.org/git/?p=gcc.git;a=tree;f=libgfortran -License: GPL-3.0-with-GCC-exception - Copyright (C) 2002-2017 Free Software Foundation, Inc. - - Libgfortran is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 3, or (at your option) - any later version. - - Libgfortran is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - Under Section 7 of GPL version 3, you are granted additional - permissions described in the GCC Runtime Library Exception, version - 3.1, as published by the Free Software Foundation. - - You should have received a copy of the GNU General Public License and - a copy of the GCC Runtime Library Exception along with this program; - see the files COPYING3 and COPYING.RUNTIME respectively. If not, see - . - ----- - -Full text of license texts referred to above follows (that they are -listed below does not necessarily imply the conditions apply to the -present binary release): - ----- - -GCC RUNTIME LIBRARY EXCEPTION - -Version 3.1, 31 March 2009 - -Copyright (C) 2009 Free Software Foundation, Inc. - -Everyone is permitted to copy and distribute verbatim copies of this -license document, but changing it is not allowed. - -This GCC Runtime Library Exception ("Exception") is an additional -permission under section 7 of the GNU General Public License, version -3 ("GPLv3"). It applies to a given file (the "Runtime Library") that -bears a notice placed by the copyright holder of the file stating that -the file is governed by GPLv3 along with this Exception. - -When you use GCC to compile a program, GCC may combine portions of -certain GCC header files and runtime libraries with the compiled -program. The purpose of this Exception is to allow compilation of -non-GPL (including proprietary) programs to use, in this way, the -header files and runtime libraries covered by this Exception. - -0. Definitions. - -A file is an "Independent Module" if it either requires the Runtime -Library for execution after a Compilation Process, or makes use of an -interface provided by the Runtime Library, but is not otherwise based -on the Runtime Library. - -"GCC" means a version of the GNU Compiler Collection, with or without -modifications, governed by version 3 (or a specified later version) of -the GNU General Public License (GPL) with the option of using any -subsequent versions published by the FSF. - -"GPL-compatible Software" is software whose conditions of propagation, -modification and use would permit combination with GCC in accord with -the license of GCC. - -"Target Code" refers to output from any compiler for a real or virtual -target processor architecture, in executable form or suitable for -input to an assembler, loader, linker and/or execution -phase. Notwithstanding that, Target Code does not include data in any -format that is used as a compiler intermediate representation, or used -for producing a compiler intermediate representation. - -The "Compilation Process" transforms code entirely represented in -non-intermediate languages designed for human-written code, and/or in -Java Virtual Machine byte code, into Target Code. Thus, for example, -use of source code generators and preprocessors need not be considered -part of the Compilation Process, since the Compilation Process can be -understood as starting with the output of the generators or -preprocessors. - -A Compilation Process is "Eligible" if it is done using GCC, alone or -with other GPL-compatible software, or if it is done without using any -work based on GCC. For example, using non-GPL-compatible Software to -optimize any GCC intermediate representations would not qualify as an -Eligible Compilation Process. - -1. Grant of Additional Permission. - -You have permission to propagate a work of Target Code formed by -combining the Runtime Library with Independent Modules, even if such -propagation would otherwise violate the terms of GPLv3, provided that -all Target Code was generated by Eligible Compilation Processes. You -may then convey such a combination under terms of your choice, -consistent with the licensing of the Independent Modules. - -2. No Weakening of GCC Copyleft. - -The availability of this Exception does not imply any general -presumption that third-party software is unaffected by the copyleft -requirements of the license of GCC. - ----- - - GNU GENERAL PUBLIC LICENSE - Version 3, 29 June 2007 - - Copyright (C) 2007 Free Software Foundation, Inc. - Everyone is permitted to copy and distribute verbatim copies - of this license document, but changing it is not allowed. - - Preamble - - The GNU General Public License is a free, copyleft license for -software and other kinds of works. - - The licenses for most software and other practical works are designed -to take away your freedom to share and change the works. By contrast, -the GNU General Public License is intended to guarantee your freedom to -share and change all versions of a program--to make sure it remains free -software for all its users. We, the Free Software Foundation, use the -GNU General Public License for most of our software; it applies also to -any other work released this way by its authors. You can apply it to -your programs, too. - - When we speak of free software, we are referring to freedom, not -price. Our General Public Licenses are designed to make sure that you -have the freedom to distribute copies of free software (and charge for -them if you wish), that you receive source code or can get it if you -want it, that you can change the software or use pieces of it in new -free programs, and that you know you can do these things. - - To protect your rights, we need to prevent others from denying you -these rights or asking you to surrender the rights. Therefore, you have -certain responsibilities if you distribute copies of the software, or if -you modify it: responsibilities to respect the freedom of others. - - For example, if you distribute copies of such a program, whether -gratis or for a fee, you must pass on to the recipients the same -freedoms that you received. You must make sure that they, too, receive -or can get the source code. And you must show them these terms so they -know their rights. - - Developers that use the GNU GPL protect your rights with two steps: -(1) assert copyright on the software, and (2) offer you this License -giving you legal permission to copy, distribute and/or modify it. - - For the developers' and authors' protection, the GPL clearly explains -that there is no warranty for this free software. For both users' and -authors' sake, the GPL requires that modified versions be marked as -changed, so that their problems will not be attributed erroneously to -authors of previous versions. - - Some devices are designed to deny users access to install or run -modified versions of the software inside them, although the manufacturer -can do so. This is fundamentally incompatible with the aim of -protecting users' freedom to change the software. The systematic -pattern of such abuse occurs in the area of products for individuals to -use, which is precisely where it is most unacceptable. Therefore, we -have designed this version of the GPL to prohibit the practice for those -products. If such problems arise substantially in other domains, we -stand ready to extend this provision to those domains in future versions -of the GPL, as needed to protect the freedom of users. - - Finally, every program is threatened constantly by software patents. -States should not allow patents to restrict development and use of -software on general-purpose computers, but in those that do, we wish to -avoid the special danger that patents applied to a free program could -make it effectively proprietary. To prevent this, the GPL assures that -patents cannot be used to render the program non-free. - - The precise terms and conditions for copying, distribution and -modification follow. - - TERMS AND CONDITIONS - - 0. Definitions. - - "This License" refers to version 3 of the GNU General Public License. - - "Copyright" also means copyright-like laws that apply to other kinds of -works, such as semiconductor masks. - - "The Program" refers to any copyrightable work licensed under this -License. Each licensee is addressed as "you". "Licensees" and -"recipients" may be individuals or organizations. - - To "modify" a work means to copy from or adapt all or part of the work -in a fashion requiring copyright permission, other than the making of an -exact copy. The resulting work is called a "modified version" of the -earlier work or a work "based on" the earlier work. - - A "covered work" means either the unmodified Program or a work based -on the Program. - - To "propagate" a work means to do anything with it that, without -permission, would make you directly or secondarily liable for -infringement under applicable copyright law, except executing it on a -computer or modifying a private copy. Propagation includes copying, -distribution (with or without modification), making available to the -public, and in some countries other activities as well. - - To "convey" a work means any kind of propagation that enables other -parties to make or receive copies. Mere interaction with a user through -a computer network, with no transfer of a copy, is not conveying. - - An interactive user interface displays "Appropriate Legal Notices" -to the extent that it includes a convenient and prominently visible -feature that (1) displays an appropriate copyright notice, and (2) -tells the user that there is no warranty for the work (except to the -extent that warranties are provided), that licensees may convey the -work under this License, and how to view a copy of this License. If -the interface presents a list of user commands or options, such as a -menu, a prominent item in the list meets this criterion. - - 1. Source Code. - - The "source code" for a work means the preferred form of the work -for making modifications to it. "Object code" means any non-source -form of a work. - - A "Standard Interface" means an interface that either is an official -standard defined by a recognized standards body, or, in the case of -interfaces specified for a particular programming language, one that -is widely used among developers working in that language. - - The "System Libraries" of an executable work include anything, other -than the work as a whole, that (a) is included in the normal form of -packaging a Major Component, but which is not part of that Major -Component, and (b) serves only to enable use of the work with that -Major Component, or to implement a Standard Interface for which an -implementation is available to the public in source code form. A -"Major Component", in this context, means a major essential component -(kernel, window system, and so on) of the specific operating system -(if any) on which the executable work runs, or a compiler used to -produce the work, or an object code interpreter used to run it. - - The "Corresponding Source" for a work in object code form means all -the source code needed to generate, install, and (for an executable -work) run the object code and to modify the work, including scripts to -control those activities. However, it does not include the work's -System Libraries, or general-purpose tools or generally available free -programs which are used unmodified in performing those activities but -which are not part of the work. For example, Corresponding Source -includes interface definition files associated with source files for -the work, and the source code for shared libraries and dynamically -linked subprograms that the work is specifically designed to require, -such as by intimate data communication or control flow between those -subprograms and other parts of the work. - - The Corresponding Source need not include anything that users -can regenerate automatically from other parts of the Corresponding -Source. - - The Corresponding Source for a work in source code form is that -same work. - - 2. Basic Permissions. - - All rights granted under this License are granted for the term of -copyright on the Program, and are irrevocable provided the stated -conditions are met. This License explicitly affirms your unlimited -permission to run the unmodified Program. The output from running a -covered work is covered by this License only if the output, given its -content, constitutes a covered work. This License acknowledges your -rights of fair use or other equivalent, as provided by copyright law. - - You may make, run and propagate covered works that you do not -convey, without conditions so long as your license otherwise remains -in force. You may convey covered works to others for the sole purpose -of having them make modifications exclusively for you, or provide you -with facilities for running those works, provided that you comply with -the terms of this License in conveying all material for which you do -not control copyright. Those thus making or running the covered works -for you must do so exclusively on your behalf, under your direction -and control, on terms that prohibit them from making any copies of -your copyrighted material outside their relationship with you. - - Conveying under any other circumstances is permitted solely under -the conditions stated below. Sublicensing is not allowed; section 10 -makes it unnecessary. - - 3. Protecting Users' Legal Rights From Anti-Circumvention Law. - - No covered work shall be deemed part of an effective technological -measure under any applicable law fulfilling obligations under article -11 of the WIPO copyright treaty adopted on 20 December 1996, or -similar laws prohibiting or restricting circumvention of such -measures. - - When you convey a covered work, you waive any legal power to forbid -circumvention of technological measures to the extent such circumvention -is effected by exercising rights under this License with respect to -the covered work, and you disclaim any intention to limit operation or -modification of the work as a means of enforcing, against the work's -users, your or third parties' legal rights to forbid circumvention of -technological measures. - - 4. Conveying Verbatim Copies. - - You may convey verbatim copies of the Program's source code as you -receive it, in any medium, provided that you conspicuously and -appropriately publish on each copy an appropriate copyright notice; -keep intact all notices stating that this License and any -non-permissive terms added in accord with section 7 apply to the code; -keep intact all notices of the absence of any warranty; and give all -recipients a copy of this License along with the Program. - - You may charge any price or no price for each copy that you convey, -and you may offer support or warranty protection for a fee. - - 5. Conveying Modified Source Versions. - - You may convey a work based on the Program, or the modifications to -produce it from the Program, in the form of source code under the -terms of section 4, provided that you also meet all of these conditions: - - a) The work must carry prominent notices stating that you modified - it, and giving a relevant date. - - b) The work must carry prominent notices stating that it is - released under this License and any conditions added under section - 7. This requirement modifies the requirement in section 4 to - "keep intact all notices". - - c) You must license the entire work, as a whole, under this - License to anyone who comes into possession of a copy. This - License will therefore apply, along with any applicable section 7 - additional terms, to the whole of the work, and all its parts, - regardless of how they are packaged. This License gives no - permission to license the work in any other way, but it does not - invalidate such permission if you have separately received it. - - d) If the work has interactive user interfaces, each must display - Appropriate Legal Notices; however, if the Program has interactive - interfaces that do not display Appropriate Legal Notices, your - work need not make them do so. - - A compilation of a covered work with other separate and independent -works, which are not by their nature extensions of the covered work, -and which are not combined with it such as to form a larger program, -in or on a volume of a storage or distribution medium, is called an -"aggregate" if the compilation and its resulting copyright are not -used to limit the access or legal rights of the compilation's users -beyond what the individual works permit. Inclusion of a covered work -in an aggregate does not cause this License to apply to the other -parts of the aggregate. - - 6. Conveying Non-Source Forms. - - You may convey a covered work in object code form under the terms -of sections 4 and 5, provided that you also convey the -machine-readable Corresponding Source under the terms of this License, -in one of these ways: - - a) Convey the object code in, or embodied in, a physical product - (including a physical distribution medium), accompanied by the - Corresponding Source fixed on a durable physical medium - customarily used for software interchange. - - b) Convey the object code in, or embodied in, a physical product - (including a physical distribution medium), accompanied by a - written offer, valid for at least three years and valid for as - long as you offer spare parts or customer support for that product - model, to give anyone who possesses the object code either (1) a - copy of the Corresponding Source for all the software in the - product that is covered by this License, on a durable physical - medium customarily used for software interchange, for a price no - more than your reasonable cost of physically performing this - conveying of source, or (2) access to copy the - Corresponding Source from a network server at no charge. - - c) Convey individual copies of the object code with a copy of the - written offer to provide the Corresponding Source. This - alternative is allowed only occasionally and noncommercially, and - only if you received the object code with such an offer, in accord - with subsection 6b. - - d) Convey the object code by offering access from a designated - place (gratis or for a charge), and offer equivalent access to the - Corresponding Source in the same way through the same place at no - further charge. You need not require recipients to copy the - Corresponding Source along with the object code. If the place to - copy the object code is a network server, the Corresponding Source - may be on a different server (operated by you or a third party) - that supports equivalent copying facilities, provided you maintain - clear directions next to the object code saying where to find the - Corresponding Source. Regardless of what server hosts the - Corresponding Source, you remain obligated to ensure that it is - available for as long as needed to satisfy these requirements. - - e) Convey the object code using peer-to-peer transmission, provided - you inform other peers where the object code and Corresponding - Source of the work are being offered to the general public at no - charge under subsection 6d. - - A separable portion of the object code, whose source code is excluded -from the Corresponding Source as a System Library, need not be -included in conveying the object code work. - - A "User Product" is either (1) a "consumer product", which means any -tangible personal property which is normally used for personal, family, -or household purposes, or (2) anything designed or sold for incorporation -into a dwelling. In determining whether a product is a consumer product, -doubtful cases shall be resolved in favor of coverage. For a particular -product received by a particular user, "normally used" refers to a -typical or common use of that class of product, regardless of the status -of the particular user or of the way in which the particular user -actually uses, or expects or is expected to use, the product. A product -is a consumer product regardless of whether the product has substantial -commercial, industrial or non-consumer uses, unless such uses represent -the only significant mode of use of the product. - - "Installation Information" for a User Product means any methods, -procedures, authorization keys, or other information required to install -and execute modified versions of a covered work in that User Product from -a modified version of its Corresponding Source. The information must -suffice to ensure that the continued functioning of the modified object -code is in no case prevented or interfered with solely because -modification has been made. - - If you convey an object code work under this section in, or with, or -specifically for use in, a User Product, and the conveying occurs as -part of a transaction in which the right of possession and use of the -User Product is transferred to the recipient in perpetuity or for a -fixed term (regardless of how the transaction is characterized), the -Corresponding Source conveyed under this section must be accompanied -by the Installation Information. But this requirement does not apply -if neither you nor any third party retains the ability to install -modified object code on the User Product (for example, the work has -been installed in ROM). - - The requirement to provide Installation Information does not include a -requirement to continue to provide support service, warranty, or updates -for a work that has been modified or installed by the recipient, or for -the User Product in which it has been modified or installed. Access to a -network may be denied when the modification itself materially and -adversely affects the operation of the network or violates the rules and -protocols for communication across the network. - - Corresponding Source conveyed, and Installation Information provided, -in accord with this section must be in a format that is publicly -documented (and with an implementation available to the public in -source code form), and must require no special password or key for -unpacking, reading or copying. - - 7. Additional Terms. - - "Additional permissions" are terms that supplement the terms of this -License by making exceptions from one or more of its conditions. -Additional permissions that are applicable to the entire Program shall -be treated as though they were included in this License, to the extent -that they are valid under applicable law. If additional permissions -apply only to part of the Program, that part may be used separately -under those permissions, but the entire Program remains governed by -this License without regard to the additional permissions. - - When you convey a copy of a covered work, you may at your option -remove any additional permissions from that copy, or from any part of -it. (Additional permissions may be written to require their own -removal in certain cases when you modify the work.) You may place -additional permissions on material, added by you to a covered work, -for which you have or can give appropriate copyright permission. - - Notwithstanding any other provision of this License, for material you -add to a covered work, you may (if authorized by the copyright holders of -that material) supplement the terms of this License with terms: - - a) Disclaiming warranty or limiting liability differently from the - terms of sections 15 and 16 of this License; or - - b) Requiring preservation of specified reasonable legal notices or - author attributions in that material or in the Appropriate Legal - Notices displayed by works containing it; or - - c) Prohibiting misrepresentation of the origin of that material, or - requiring that modified versions of such material be marked in - reasonable ways as different from the original version; or - - d) Limiting the use for publicity purposes of names of licensors or - authors of the material; or - - e) Declining to grant rights under trademark law for use of some - trade names, trademarks, or service marks; or - - f) Requiring indemnification of licensors and authors of that - material by anyone who conveys the material (or modified versions of - it) with contractual assumptions of liability to the recipient, for - any liability that these contractual assumptions directly impose on - those licensors and authors. - - All other non-permissive additional terms are considered "further -restrictions" within the meaning of section 10. If the Program as you -received it, or any part of it, contains a notice stating that it is -governed by this License along with a term that is a further -restriction, you may remove that term. If a license document contains -a further restriction but permits relicensing or conveying under this -License, you may add to a covered work material governed by the terms -of that license document, provided that the further restriction does -not survive such relicensing or conveying. - - If you add terms to a covered work in accord with this section, you -must place, in the relevant source files, a statement of the -additional terms that apply to those files, or a notice indicating -where to find the applicable terms. - - Additional terms, permissive or non-permissive, may be stated in the -form of a separately written license, or stated as exceptions; -the above requirements apply either way. - - 8. Termination. - - You may not propagate or modify a covered work except as expressly -provided under this License. Any attempt otherwise to propagate or -modify it is void, and will automatically terminate your rights under -this License (including any patent licenses granted under the third -paragraph of section 11). - - However, if you cease all violation of this License, then your -license from a particular copyright holder is reinstated (a) -provisionally, unless and until the copyright holder explicitly and -finally terminates your license, and (b) permanently, if the copyright -holder fails to notify you of the violation by some reasonable means -prior to 60 days after the cessation. - - Moreover, your license from a particular copyright holder is -reinstated permanently if the copyright holder notifies you of the -violation by some reasonable means, this is the first time you have -received notice of violation of this License (for any work) from that -copyright holder, and you cure the violation prior to 30 days after -your receipt of the notice. - - Termination of your rights under this section does not terminate the -licenses of parties who have received copies or rights from you under -this License. If your rights have been terminated and not permanently -reinstated, you do not qualify to receive new licenses for the same -material under section 10. - - 9. Acceptance Not Required for Having Copies. - - You are not required to accept this License in order to receive or -run a copy of the Program. Ancillary propagation of a covered work -occurring solely as a consequence of using peer-to-peer transmission -to receive a copy likewise does not require acceptance. However, -nothing other than this License grants you permission to propagate or -modify any covered work. These actions infringe copyright if you do -not accept this License. Therefore, by modifying or propagating a -covered work, you indicate your acceptance of this License to do so. - - 10. Automatic Licensing of Downstream Recipients. - - Each time you convey a covered work, the recipient automatically -receives a license from the original licensors, to run, modify and -propagate that work, subject to this License. You are not responsible -for enforcing compliance by third parties with this License. - - An "entity transaction" is a transaction transferring control of an -organization, or substantially all assets of one, or subdividing an -organization, or merging organizations. If propagation of a covered -work results from an entity transaction, each party to that -transaction who receives a copy of the work also receives whatever -licenses to the work the party's predecessor in interest had or could -give under the previous paragraph, plus a right to possession of the -Corresponding Source of the work from the predecessor in interest, if -the predecessor has it or can get it with reasonable efforts. - - You may not impose any further restrictions on the exercise of the -rights granted or affirmed under this License. For example, you may -not impose a license fee, royalty, or other charge for exercise of -rights granted under this License, and you may not initiate litigation -(including a cross-claim or counterclaim in a lawsuit) alleging that -any patent claim is infringed by making, using, selling, offering for -sale, or importing the Program or any portion of it. - - 11. Patents. - - A "contributor" is a copyright holder who authorizes use under this -License of the Program or a work on which the Program is based. The -work thus licensed is called the contributor's "contributor version". - - A contributor's "essential patent claims" are all patent claims -owned or controlled by the contributor, whether already acquired or -hereafter acquired, that would be infringed by some manner, permitted -by this License, of making, using, or selling its contributor version, -but do not include claims that would be infringed only as a -consequence of further modification of the contributor version. For -purposes of this definition, "control" includes the right to grant -patent sublicenses in a manner consistent with the requirements of -this License. - - Each contributor grants you a non-exclusive, worldwide, royalty-free -patent license under the contributor's essential patent claims, to -make, use, sell, offer for sale, import and otherwise run, modify and -propagate the contents of its contributor version. - - In the following three paragraphs, a "patent license" is any express -agreement or commitment, however denominated, not to enforce a patent -(such as an express permission to practice a patent or covenant not to -sue for patent infringement). To "grant" such a patent license to a -party means to make such an agreement or commitment not to enforce a -patent against the party. - - If you convey a covered work, knowingly relying on a patent license, -and the Corresponding Source of the work is not available for anyone -to copy, free of charge and under the terms of this License, through a -publicly available network server or other readily accessible means, -then you must either (1) cause the Corresponding Source to be so -available, or (2) arrange to deprive yourself of the benefit of the -patent license for this particular work, or (3) arrange, in a manner -consistent with the requirements of this License, to extend the patent -license to downstream recipients. "Knowingly relying" means you have -actual knowledge that, but for the patent license, your conveying the -covered work in a country, or your recipient's use of the covered work -in a country, would infringe one or more identifiable patents in that -country that you have reason to believe are valid. - - If, pursuant to or in connection with a single transaction or -arrangement, you convey, or propagate by procuring conveyance of, a -covered work, and grant a patent license to some of the parties -receiving the covered work authorizing them to use, propagate, modify -or convey a specific copy of the covered work, then the patent license -you grant is automatically extended to all recipients of the covered -work and works based on it. - - A patent license is "discriminatory" if it does not include within -the scope of its coverage, prohibits the exercise of, or is -conditioned on the non-exercise of one or more of the rights that are -specifically granted under this License. You may not convey a covered -work if you are a party to an arrangement with a third party that is -in the business of distributing software, under which you make payment -to the third party based on the extent of your activity of conveying -the work, and under which the third party grants, to any of the -parties who would receive the covered work from you, a discriminatory -patent license (a) in connection with copies of the covered work -conveyed by you (or copies made from those copies), or (b) primarily -for and in connection with specific products or compilations that -contain the covered work, unless you entered into that arrangement, -or that patent license was granted, prior to 28 March 2007. - - Nothing in this License shall be construed as excluding or limiting -any implied license or other defenses to infringement that may -otherwise be available to you under applicable patent law. - - 12. No Surrender of Others' Freedom. - - If conditions are imposed on you (whether by court order, agreement or -otherwise) that contradict the conditions of this License, they do not -excuse you from the conditions of this License. If you cannot convey a -covered work so as to satisfy simultaneously your obligations under this -License and any other pertinent obligations, then as a consequence you may -not convey it at all. For example, if you agree to terms that obligate you -to collect a royalty for further conveying from those to whom you convey -the Program, the only way you could satisfy both those terms and this -License would be to refrain entirely from conveying the Program. - - 13. Use with the GNU Affero General Public License. - - Notwithstanding any other provision of this License, you have -permission to link or combine any covered work with a work licensed -under version 3 of the GNU Affero General Public License into a single -combined work, and to convey the resulting work. The terms of this -License will continue to apply to the part which is the covered work, -but the special requirements of the GNU Affero General Public License, -section 13, concerning interaction through a network will apply to the -combination as such. - - 14. Revised Versions of this License. - - The Free Software Foundation may publish revised and/or new versions of -the GNU General Public License from time to time. Such new versions will -be similar in spirit to the present version, but may differ in detail to -address new problems or concerns. - - Each version is given a distinguishing version number. If the -Program specifies that a certain numbered version of the GNU General -Public License "or any later version" applies to it, you have the -option of following the terms and conditions either of that numbered -version or of any later version published by the Free Software -Foundation. If the Program does not specify a version number of the -GNU General Public License, you may choose any version ever published -by the Free Software Foundation. - - If the Program specifies that a proxy can decide which future -versions of the GNU General Public License can be used, that proxy's -public statement of acceptance of a version permanently authorizes you -to choose that version for the Program. - - Later license versions may give you additional or different -permissions. However, no additional obligations are imposed on any -author or copyright holder as a result of your choosing to follow a -later version. - - 15. Disclaimer of Warranty. - - THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY -APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT -HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY -OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, -THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM -IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF -ALL NECESSARY SERVICING, REPAIR OR CORRECTION. - - 16. Limitation of Liability. - - IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING -WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS -THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY -GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE -USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF -DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD -PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), -EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF -SUCH DAMAGES. - - 17. Interpretation of Sections 15 and 16. - - If the disclaimer of warranty and limitation of liability provided -above cannot be given local legal effect according to their terms, -reviewing courts shall apply local law that most closely approximates -an absolute waiver of all civil liability in connection with the -Program, unless a warranty or assumption of liability accompanies a -copy of the Program in return for a fee. - - END OF TERMS AND CONDITIONS - - How to Apply These Terms to Your New Programs - - If you develop a new program, and you want it to be of the greatest -possible use to the public, the best way to achieve this is to make it -free software which everyone can redistribute and change under these terms. - - To do so, attach the following notices to the program. It is safest -to attach them to the start of each source file to most effectively -state the exclusion of warranty; and each file should have at least -the "copyright" line and a pointer to where the full notice is found. - - - Copyright (C) - - This program is free software: you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation, either version 3 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program. If not, see . - -Also add information on how to contact you by electronic and paper mail. - - If the program does terminal interaction, make it output a short -notice like this when it starts in an interactive mode: - - Copyright (C) - This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. - This is free software, and you are welcome to redistribute it - under certain conditions; type `show c' for details. - -The hypothetical commands `show w' and `show c' should show the appropriate -parts of the General Public License. Of course, your program's commands -might be different; for a GUI interface, you would use an "about box". - - You should also get your employer (if you work as a programmer) or school, -if any, to sign a "copyright disclaimer" for the program, if necessary. -For more information on this, and how to apply and follow the GNU GPL, see -. - - The GNU General Public License does not permit incorporating your program -into proprietary programs. If your program is a subroutine library, you -may consider it more useful to permit linking proprietary applications with -the library. If this is what you want to do, use the GNU Lesser General -Public License instead of this License. But first, please read -. - -Name: libquadmath -Files: numpy.libs/libquadmath*.so -Description: dynamically linked to files compiled with gcc -Availability: https://gcc.gnu.org/git/?p=gcc.git;a=tree;f=libquadmath -License: LGPL-2.1-or-later - - GCC Quad-Precision Math Library - Copyright (C) 2010-2019 Free Software Foundation, Inc. - Written by Francois-Xavier Coudert - - This file is part of the libquadmath library. - Libquadmath is free software; you can redistribute it and/or - modify it under the terms of the GNU Library General Public - License as published by the Free Software Foundation; either - version 2.1 of the License, or (at your option) any later version. - - Libquadmath is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - Lesser General Public License for more details. - https://www.gnu.org/licenses/old-licenses/lgpl-2.1.html diff --git a/tools/wheels/LICENSE_osx.txt b/tools/wheels/LICENSE_osx.txt deleted file mode 100644 index 7ef2e381874e..000000000000 --- a/tools/wheels/LICENSE_osx.txt +++ /dev/null @@ -1,902 +0,0 @@ - ----- - -This binary distribution of NumPy also bundles the following software: - - -Name: OpenBLAS -Files: numpy/.dylibs/libscipy_openblas*.so -Description: bundled as a dynamically linked library -Availability: https://github.com/OpenMathLib/OpenBLAS/ -License: BSD-3-Clause - Copyright (c) 2011-2014, The OpenBLAS Project - All rights reserved. - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are - met: - - 1. Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - - 2. Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - 3. Neither the name of the OpenBLAS project nor the names of - its contributors may be used to endorse or promote products - derived from this software without specific prior written - permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR - SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER - CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, - OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE - USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - -Name: LAPACK -Files: numpy/.dylibs/libscipy_openblas*.so -Description: bundled in OpenBLAS -Availability: https://github.com/OpenMathLib/OpenBLAS/ -License: BSD-3-Clause-Attribution - Copyright (c) 1992-2013 The University of Tennessee and The University - of Tennessee Research Foundation. All rights - reserved. - Copyright (c) 2000-2013 The University of California Berkeley. All - rights reserved. - Copyright (c) 2006-2013 The University of Colorado Denver. All rights - reserved. - - $COPYRIGHT$ - - Additional copyrights may follow - - $HEADER$ - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are - met: - - - Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - - - Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer listed - in this license in the documentation and/or other materials - provided with the distribution. - - - Neither the name of the copyright holders nor the names of its - contributors may be used to endorse or promote products derived from - this software without specific prior written permission. - - The copyright holders provide no reassurances that the source code - provided does not infringe any patent, copyright, or any other - intellectual property rights of third parties. The copyright holders - disclaim any liability to any recipient for claims brought against - recipient by any third party for infringement of that parties - intellectual property rights. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - -Name: GCC runtime library -Files: numpy/.dylibs/libgfortran*, numpy/.dylibs/libgcc* -Description: dynamically linked to files compiled with gcc -Availability: https://gcc.gnu.org/git/?p=gcc.git;a=tree;f=libgfortran -License: GPL-3.0-with-GCC-exception - Copyright (C) 2002-2017 Free Software Foundation, Inc. - - Libgfortran is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 3, or (at your option) - any later version. - - Libgfortran is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - Under Section 7 of GPL version 3, you are granted additional - permissions described in the GCC Runtime Library Exception, version - 3.1, as published by the Free Software Foundation. - - You should have received a copy of the GNU General Public License and - a copy of the GCC Runtime Library Exception along with this program; - see the files COPYING3 and COPYING.RUNTIME respectively. If not, see - . - ----- - -Full text of license texts referred to above follows (that they are -listed below does not necessarily imply the conditions apply to the -present binary release): - ----- - -GCC RUNTIME LIBRARY EXCEPTION - -Version 3.1, 31 March 2009 - -Copyright (C) 2009 Free Software Foundation, Inc. - -Everyone is permitted to copy and distribute verbatim copies of this -license document, but changing it is not allowed. - -This GCC Runtime Library Exception ("Exception") is an additional -permission under section 7 of the GNU General Public License, version -3 ("GPLv3"). It applies to a given file (the "Runtime Library") that -bears a notice placed by the copyright holder of the file stating that -the file is governed by GPLv3 along with this Exception. - -When you use GCC to compile a program, GCC may combine portions of -certain GCC header files and runtime libraries with the compiled -program. The purpose of this Exception is to allow compilation of -non-GPL (including proprietary) programs to use, in this way, the -header files and runtime libraries covered by this Exception. - -0. Definitions. - -A file is an "Independent Module" if it either requires the Runtime -Library for execution after a Compilation Process, or makes use of an -interface provided by the Runtime Library, but is not otherwise based -on the Runtime Library. - -"GCC" means a version of the GNU Compiler Collection, with or without -modifications, governed by version 3 (or a specified later version) of -the GNU General Public License (GPL) with the option of using any -subsequent versions published by the FSF. - -"GPL-compatible Software" is software whose conditions of propagation, -modification and use would permit combination with GCC in accord with -the license of GCC. - -"Target Code" refers to output from any compiler for a real or virtual -target processor architecture, in executable form or suitable for -input to an assembler, loader, linker and/or execution -phase. Notwithstanding that, Target Code does not include data in any -format that is used as a compiler intermediate representation, or used -for producing a compiler intermediate representation. - -The "Compilation Process" transforms code entirely represented in -non-intermediate languages designed for human-written code, and/or in -Java Virtual Machine byte code, into Target Code. Thus, for example, -use of source code generators and preprocessors need not be considered -part of the Compilation Process, since the Compilation Process can be -understood as starting with the output of the generators or -preprocessors. - -A Compilation Process is "Eligible" if it is done using GCC, alone or -with other GPL-compatible software, or if it is done without using any -work based on GCC. For example, using non-GPL-compatible Software to -optimize any GCC intermediate representations would not qualify as an -Eligible Compilation Process. - -1. Grant of Additional Permission. - -You have permission to propagate a work of Target Code formed by -combining the Runtime Library with Independent Modules, even if such -propagation would otherwise violate the terms of GPLv3, provided that -all Target Code was generated by Eligible Compilation Processes. You -may then convey such a combination under terms of your choice, -consistent with the licensing of the Independent Modules. - -2. No Weakening of GCC Copyleft. - -The availability of this Exception does not imply any general -presumption that third-party software is unaffected by the copyleft -requirements of the license of GCC. - ----- - - GNU GENERAL PUBLIC LICENSE - Version 3, 29 June 2007 - - Copyright (C) 2007 Free Software Foundation, Inc. - Everyone is permitted to copy and distribute verbatim copies - of this license document, but changing it is not allowed. - - Preamble - - The GNU General Public License is a free, copyleft license for -software and other kinds of works. - - The licenses for most software and other practical works are designed -to take away your freedom to share and change the works. By contrast, -the GNU General Public License is intended to guarantee your freedom to -share and change all versions of a program--to make sure it remains free -software for all its users. We, the Free Software Foundation, use the -GNU General Public License for most of our software; it applies also to -any other work released this way by its authors. You can apply it to -your programs, too. - - When we speak of free software, we are referring to freedom, not -price. Our General Public Licenses are designed to make sure that you -have the freedom to distribute copies of free software (and charge for -them if you wish), that you receive source code or can get it if you -want it, that you can change the software or use pieces of it in new -free programs, and that you know you can do these things. - - To protect your rights, we need to prevent others from denying you -these rights or asking you to surrender the rights. Therefore, you have -certain responsibilities if you distribute copies of the software, or if -you modify it: responsibilities to respect the freedom of others. - - For example, if you distribute copies of such a program, whether -gratis or for a fee, you must pass on to the recipients the same -freedoms that you received. You must make sure that they, too, receive -or can get the source code. And you must show them these terms so they -know their rights. - - Developers that use the GNU GPL protect your rights with two steps: -(1) assert copyright on the software, and (2) offer you this License -giving you legal permission to copy, distribute and/or modify it. - - For the developers' and authors' protection, the GPL clearly explains -that there is no warranty for this free software. For both users' and -authors' sake, the GPL requires that modified versions be marked as -changed, so that their problems will not be attributed erroneously to -authors of previous versions. - - Some devices are designed to deny users access to install or run -modified versions of the software inside them, although the manufacturer -can do so. This is fundamentally incompatible with the aim of -protecting users' freedom to change the software. The systematic -pattern of such abuse occurs in the area of products for individuals to -use, which is precisely where it is most unacceptable. Therefore, we -have designed this version of the GPL to prohibit the practice for those -products. If such problems arise substantially in other domains, we -stand ready to extend this provision to those domains in future versions -of the GPL, as needed to protect the freedom of users. - - Finally, every program is threatened constantly by software patents. -States should not allow patents to restrict development and use of -software on general-purpose computers, but in those that do, we wish to -avoid the special danger that patents applied to a free program could -make it effectively proprietary. To prevent this, the GPL assures that -patents cannot be used to render the program non-free. - - The precise terms and conditions for copying, distribution and -modification follow. - - TERMS AND CONDITIONS - - 0. Definitions. - - "This License" refers to version 3 of the GNU General Public License. - - "Copyright" also means copyright-like laws that apply to other kinds of -works, such as semiconductor masks. - - "The Program" refers to any copyrightable work licensed under this -License. Each licensee is addressed as "you". "Licensees" and -"recipients" may be individuals or organizations. - - To "modify" a work means to copy from or adapt all or part of the work -in a fashion requiring copyright permission, other than the making of an -exact copy. The resulting work is called a "modified version" of the -earlier work or a work "based on" the earlier work. - - A "covered work" means either the unmodified Program or a work based -on the Program. - - To "propagate" a work means to do anything with it that, without -permission, would make you directly or secondarily liable for -infringement under applicable copyright law, except executing it on a -computer or modifying a private copy. Propagation includes copying, -distribution (with or without modification), making available to the -public, and in some countries other activities as well. - - To "convey" a work means any kind of propagation that enables other -parties to make or receive copies. Mere interaction with a user through -a computer network, with no transfer of a copy, is not conveying. - - An interactive user interface displays "Appropriate Legal Notices" -to the extent that it includes a convenient and prominently visible -feature that (1) displays an appropriate copyright notice, and (2) -tells the user that there is no warranty for the work (except to the -extent that warranties are provided), that licensees may convey the -work under this License, and how to view a copy of this License. If -the interface presents a list of user commands or options, such as a -menu, a prominent item in the list meets this criterion. - - 1. Source Code. - - The "source code" for a work means the preferred form of the work -for making modifications to it. "Object code" means any non-source -form of a work. - - A "Standard Interface" means an interface that either is an official -standard defined by a recognized standards body, or, in the case of -interfaces specified for a particular programming language, one that -is widely used among developers working in that language. - - The "System Libraries" of an executable work include anything, other -than the work as a whole, that (a) is included in the normal form of -packaging a Major Component, but which is not part of that Major -Component, and (b) serves only to enable use of the work with that -Major Component, or to implement a Standard Interface for which an -implementation is available to the public in source code form. A -"Major Component", in this context, means a major essential component -(kernel, window system, and so on) of the specific operating system -(if any) on which the executable work runs, or a compiler used to -produce the work, or an object code interpreter used to run it. - - The "Corresponding Source" for a work in object code form means all -the source code needed to generate, install, and (for an executable -work) run the object code and to modify the work, including scripts to -control those activities. However, it does not include the work's -System Libraries, or general-purpose tools or generally available free -programs which are used unmodified in performing those activities but -which are not part of the work. For example, Corresponding Source -includes interface definition files associated with source files for -the work, and the source code for shared libraries and dynamically -linked subprograms that the work is specifically designed to require, -such as by intimate data communication or control flow between those -subprograms and other parts of the work. - - The Corresponding Source need not include anything that users -can regenerate automatically from other parts of the Corresponding -Source. - - The Corresponding Source for a work in source code form is that -same work. - - 2. Basic Permissions. - - All rights granted under this License are granted for the term of -copyright on the Program, and are irrevocable provided the stated -conditions are met. This License explicitly affirms your unlimited -permission to run the unmodified Program. The output from running a -covered work is covered by this License only if the output, given its -content, constitutes a covered work. This License acknowledges your -rights of fair use or other equivalent, as provided by copyright law. - - You may make, run and propagate covered works that you do not -convey, without conditions so long as your license otherwise remains -in force. You may convey covered works to others for the sole purpose -of having them make modifications exclusively for you, or provide you -with facilities for running those works, provided that you comply with -the terms of this License in conveying all material for which you do -not control copyright. Those thus making or running the covered works -for you must do so exclusively on your behalf, under your direction -and control, on terms that prohibit them from making any copies of -your copyrighted material outside their relationship with you. - - Conveying under any other circumstances is permitted solely under -the conditions stated below. Sublicensing is not allowed; section 10 -makes it unnecessary. - - 3. Protecting Users' Legal Rights From Anti-Circumvention Law. - - No covered work shall be deemed part of an effective technological -measure under any applicable law fulfilling obligations under article -11 of the WIPO copyright treaty adopted on 20 December 1996, or -similar laws prohibiting or restricting circumvention of such -measures. - - When you convey a covered work, you waive any legal power to forbid -circumvention of technological measures to the extent such circumvention -is effected by exercising rights under this License with respect to -the covered work, and you disclaim any intention to limit operation or -modification of the work as a means of enforcing, against the work's -users, your or third parties' legal rights to forbid circumvention of -technological measures. - - 4. Conveying Verbatim Copies. - - You may convey verbatim copies of the Program's source code as you -receive it, in any medium, provided that you conspicuously and -appropriately publish on each copy an appropriate copyright notice; -keep intact all notices stating that this License and any -non-permissive terms added in accord with section 7 apply to the code; -keep intact all notices of the absence of any warranty; and give all -recipients a copy of this License along with the Program. - - You may charge any price or no price for each copy that you convey, -and you may offer support or warranty protection for a fee. - - 5. Conveying Modified Source Versions. - - You may convey a work based on the Program, or the modifications to -produce it from the Program, in the form of source code under the -terms of section 4, provided that you also meet all of these conditions: - - a) The work must carry prominent notices stating that you modified - it, and giving a relevant date. - - b) The work must carry prominent notices stating that it is - released under this License and any conditions added under section - 7. This requirement modifies the requirement in section 4 to - "keep intact all notices". - - c) You must license the entire work, as a whole, under this - License to anyone who comes into possession of a copy. This - License will therefore apply, along with any applicable section 7 - additional terms, to the whole of the work, and all its parts, - regardless of how they are packaged. This License gives no - permission to license the work in any other way, but it does not - invalidate such permission if you have separately received it. - - d) If the work has interactive user interfaces, each must display - Appropriate Legal Notices; however, if the Program has interactive - interfaces that do not display Appropriate Legal Notices, your - work need not make them do so. - - A compilation of a covered work with other separate and independent -works, which are not by their nature extensions of the covered work, -and which are not combined with it such as to form a larger program, -in or on a volume of a storage or distribution medium, is called an -"aggregate" if the compilation and its resulting copyright are not -used to limit the access or legal rights of the compilation's users -beyond what the individual works permit. Inclusion of a covered work -in an aggregate does not cause this License to apply to the other -parts of the aggregate. - - 6. Conveying Non-Source Forms. - - You may convey a covered work in object code form under the terms -of sections 4 and 5, provided that you also convey the -machine-readable Corresponding Source under the terms of this License, -in one of these ways: - - a) Convey the object code in, or embodied in, a physical product - (including a physical distribution medium), accompanied by the - Corresponding Source fixed on a durable physical medium - customarily used for software interchange. - - b) Convey the object code in, or embodied in, a physical product - (including a physical distribution medium), accompanied by a - written offer, valid for at least three years and valid for as - long as you offer spare parts or customer support for that product - model, to give anyone who possesses the object code either (1) a - copy of the Corresponding Source for all the software in the - product that is covered by this License, on a durable physical - medium customarily used for software interchange, for a price no - more than your reasonable cost of physically performing this - conveying of source, or (2) access to copy the - Corresponding Source from a network server at no charge. - - c) Convey individual copies of the object code with a copy of the - written offer to provide the Corresponding Source. This - alternative is allowed only occasionally and noncommercially, and - only if you received the object code with such an offer, in accord - with subsection 6b. - - d) Convey the object code by offering access from a designated - place (gratis or for a charge), and offer equivalent access to the - Corresponding Source in the same way through the same place at no - further charge. You need not require recipients to copy the - Corresponding Source along with the object code. If the place to - copy the object code is a network server, the Corresponding Source - may be on a different server (operated by you or a third party) - that supports equivalent copying facilities, provided you maintain - clear directions next to the object code saying where to find the - Corresponding Source. Regardless of what server hosts the - Corresponding Source, you remain obligated to ensure that it is - available for as long as needed to satisfy these requirements. - - e) Convey the object code using peer-to-peer transmission, provided - you inform other peers where the object code and Corresponding - Source of the work are being offered to the general public at no - charge under subsection 6d. - - A separable portion of the object code, whose source code is excluded -from the Corresponding Source as a System Library, need not be -included in conveying the object code work. - - A "User Product" is either (1) a "consumer product", which means any -tangible personal property which is normally used for personal, family, -or household purposes, or (2) anything designed or sold for incorporation -into a dwelling. In determining whether a product is a consumer product, -doubtful cases shall be resolved in favor of coverage. For a particular -product received by a particular user, "normally used" refers to a -typical or common use of that class of product, regardless of the status -of the particular user or of the way in which the particular user -actually uses, or expects or is expected to use, the product. A product -is a consumer product regardless of whether the product has substantial -commercial, industrial or non-consumer uses, unless such uses represent -the only significant mode of use of the product. - - "Installation Information" for a User Product means any methods, -procedures, authorization keys, or other information required to install -and execute modified versions of a covered work in that User Product from -a modified version of its Corresponding Source. The information must -suffice to ensure that the continued functioning of the modified object -code is in no case prevented or interfered with solely because -modification has been made. - - If you convey an object code work under this section in, or with, or -specifically for use in, a User Product, and the conveying occurs as -part of a transaction in which the right of possession and use of the -User Product is transferred to the recipient in perpetuity or for a -fixed term (regardless of how the transaction is characterized), the -Corresponding Source conveyed under this section must be accompanied -by the Installation Information. But this requirement does not apply -if neither you nor any third party retains the ability to install -modified object code on the User Product (for example, the work has -been installed in ROM). - - The requirement to provide Installation Information does not include a -requirement to continue to provide support service, warranty, or updates -for a work that has been modified or installed by the recipient, or for -the User Product in which it has been modified or installed. Access to a -network may be denied when the modification itself materially and -adversely affects the operation of the network or violates the rules and -protocols for communication across the network. - - Corresponding Source conveyed, and Installation Information provided, -in accord with this section must be in a format that is publicly -documented (and with an implementation available to the public in -source code form), and must require no special password or key for -unpacking, reading or copying. - - 7. Additional Terms. - - "Additional permissions" are terms that supplement the terms of this -License by making exceptions from one or more of its conditions. -Additional permissions that are applicable to the entire Program shall -be treated as though they were included in this License, to the extent -that they are valid under applicable law. If additional permissions -apply only to part of the Program, that part may be used separately -under those permissions, but the entire Program remains governed by -this License without regard to the additional permissions. - - When you convey a copy of a covered work, you may at your option -remove any additional permissions from that copy, or from any part of -it. (Additional permissions may be written to require their own -removal in certain cases when you modify the work.) You may place -additional permissions on material, added by you to a covered work, -for which you have or can give appropriate copyright permission. - - Notwithstanding any other provision of this License, for material you -add to a covered work, you may (if authorized by the copyright holders of -that material) supplement the terms of this License with terms: - - a) Disclaiming warranty or limiting liability differently from the - terms of sections 15 and 16 of this License; or - - b) Requiring preservation of specified reasonable legal notices or - author attributions in that material or in the Appropriate Legal - Notices displayed by works containing it; or - - c) Prohibiting misrepresentation of the origin of that material, or - requiring that modified versions of such material be marked in - reasonable ways as different from the original version; or - - d) Limiting the use for publicity purposes of names of licensors or - authors of the material; or - - e) Declining to grant rights under trademark law for use of some - trade names, trademarks, or service marks; or - - f) Requiring indemnification of licensors and authors of that - material by anyone who conveys the material (or modified versions of - it) with contractual assumptions of liability to the recipient, for - any liability that these contractual assumptions directly impose on - those licensors and authors. - - All other non-permissive additional terms are considered "further -restrictions" within the meaning of section 10. If the Program as you -received it, or any part of it, contains a notice stating that it is -governed by this License along with a term that is a further -restriction, you may remove that term. If a license document contains -a further restriction but permits relicensing or conveying under this -License, you may add to a covered work material governed by the terms -of that license document, provided that the further restriction does -not survive such relicensing or conveying. - - If you add terms to a covered work in accord with this section, you -must place, in the relevant source files, a statement of the -additional terms that apply to those files, or a notice indicating -where to find the applicable terms. - - Additional terms, permissive or non-permissive, may be stated in the -form of a separately written license, or stated as exceptions; -the above requirements apply either way. - - 8. Termination. - - You may not propagate or modify a covered work except as expressly -provided under this License. Any attempt otherwise to propagate or -modify it is void, and will automatically terminate your rights under -this License (including any patent licenses granted under the third -paragraph of section 11). - - However, if you cease all violation of this License, then your -license from a particular copyright holder is reinstated (a) -provisionally, unless and until the copyright holder explicitly and -finally terminates your license, and (b) permanently, if the copyright -holder fails to notify you of the violation by some reasonable means -prior to 60 days after the cessation. - - Moreover, your license from a particular copyright holder is -reinstated permanently if the copyright holder notifies you of the -violation by some reasonable means, this is the first time you have -received notice of violation of this License (for any work) from that -copyright holder, and you cure the violation prior to 30 days after -your receipt of the notice. - - Termination of your rights under this section does not terminate the -licenses of parties who have received copies or rights from you under -this License. If your rights have been terminated and not permanently -reinstated, you do not qualify to receive new licenses for the same -material under section 10. - - 9. Acceptance Not Required for Having Copies. - - You are not required to accept this License in order to receive or -run a copy of the Program. Ancillary propagation of a covered work -occurring solely as a consequence of using peer-to-peer transmission -to receive a copy likewise does not require acceptance. However, -nothing other than this License grants you permission to propagate or -modify any covered work. These actions infringe copyright if you do -not accept this License. Therefore, by modifying or propagating a -covered work, you indicate your acceptance of this License to do so. - - 10. Automatic Licensing of Downstream Recipients. - - Each time you convey a covered work, the recipient automatically -receives a license from the original licensors, to run, modify and -propagate that work, subject to this License. You are not responsible -for enforcing compliance by third parties with this License. - - An "entity transaction" is a transaction transferring control of an -organization, or substantially all assets of one, or subdividing an -organization, or merging organizations. If propagation of a covered -work results from an entity transaction, each party to that -transaction who receives a copy of the work also receives whatever -licenses to the work the party's predecessor in interest had or could -give under the previous paragraph, plus a right to possession of the -Corresponding Source of the work from the predecessor in interest, if -the predecessor has it or can get it with reasonable efforts. - - You may not impose any further restrictions on the exercise of the -rights granted or affirmed under this License. For example, you may -not impose a license fee, royalty, or other charge for exercise of -rights granted under this License, and you may not initiate litigation -(including a cross-claim or counterclaim in a lawsuit) alleging that -any patent claim is infringed by making, using, selling, offering for -sale, or importing the Program or any portion of it. - - 11. Patents. - - A "contributor" is a copyright holder who authorizes use under this -License of the Program or a work on which the Program is based. The -work thus licensed is called the contributor's "contributor version". - - A contributor's "essential patent claims" are all patent claims -owned or controlled by the contributor, whether already acquired or -hereafter acquired, that would be infringed by some manner, permitted -by this License, of making, using, or selling its contributor version, -but do not include claims that would be infringed only as a -consequence of further modification of the contributor version. For -purposes of this definition, "control" includes the right to grant -patent sublicenses in a manner consistent with the requirements of -this License. - - Each contributor grants you a non-exclusive, worldwide, royalty-free -patent license under the contributor's essential patent claims, to -make, use, sell, offer for sale, import and otherwise run, modify and -propagate the contents of its contributor version. - - In the following three paragraphs, a "patent license" is any express -agreement or commitment, however denominated, not to enforce a patent -(such as an express permission to practice a patent or covenant not to -sue for patent infringement). To "grant" such a patent license to a -party means to make such an agreement or commitment not to enforce a -patent against the party. - - If you convey a covered work, knowingly relying on a patent license, -and the Corresponding Source of the work is not available for anyone -to copy, free of charge and under the terms of this License, through a -publicly available network server or other readily accessible means, -then you must either (1) cause the Corresponding Source to be so -available, or (2) arrange to deprive yourself of the benefit of the -patent license for this particular work, or (3) arrange, in a manner -consistent with the requirements of this License, to extend the patent -license to downstream recipients. "Knowingly relying" means you have -actual knowledge that, but for the patent license, your conveying the -covered work in a country, or your recipient's use of the covered work -in a country, would infringe one or more identifiable patents in that -country that you have reason to believe are valid. - - If, pursuant to or in connection with a single transaction or -arrangement, you convey, or propagate by procuring conveyance of, a -covered work, and grant a patent license to some of the parties -receiving the covered work authorizing them to use, propagate, modify -or convey a specific copy of the covered work, then the patent license -you grant is automatically extended to all recipients of the covered -work and works based on it. - - A patent license is "discriminatory" if it does not include within -the scope of its coverage, prohibits the exercise of, or is -conditioned on the non-exercise of one or more of the rights that are -specifically granted under this License. You may not convey a covered -work if you are a party to an arrangement with a third party that is -in the business of distributing software, under which you make payment -to the third party based on the extent of your activity of conveying -the work, and under which the third party grants, to any of the -parties who would receive the covered work from you, a discriminatory -patent license (a) in connection with copies of the covered work -conveyed by you (or copies made from those copies), or (b) primarily -for and in connection with specific products or compilations that -contain the covered work, unless you entered into that arrangement, -or that patent license was granted, prior to 28 March 2007. - - Nothing in this License shall be construed as excluding or limiting -any implied license or other defenses to infringement that may -otherwise be available to you under applicable patent law. - - 12. No Surrender of Others' Freedom. - - If conditions are imposed on you (whether by court order, agreement or -otherwise) that contradict the conditions of this License, they do not -excuse you from the conditions of this License. If you cannot convey a -covered work so as to satisfy simultaneously your obligations under this -License and any other pertinent obligations, then as a consequence you may -not convey it at all. For example, if you agree to terms that obligate you -to collect a royalty for further conveying from those to whom you convey -the Program, the only way you could satisfy both those terms and this -License would be to refrain entirely from conveying the Program. - - 13. Use with the GNU Affero General Public License. - - Notwithstanding any other provision of this License, you have -permission to link or combine any covered work with a work licensed -under version 3 of the GNU Affero General Public License into a single -combined work, and to convey the resulting work. The terms of this -License will continue to apply to the part which is the covered work, -but the special requirements of the GNU Affero General Public License, -section 13, concerning interaction through a network will apply to the -combination as such. - - 14. Revised Versions of this License. - - The Free Software Foundation may publish revised and/or new versions of -the GNU General Public License from time to time. Such new versions will -be similar in spirit to the present version, but may differ in detail to -address new problems or concerns. - - Each version is given a distinguishing version number. If the -Program specifies that a certain numbered version of the GNU General -Public License "or any later version" applies to it, you have the -option of following the terms and conditions either of that numbered -version or of any later version published by the Free Software -Foundation. If the Program does not specify a version number of the -GNU General Public License, you may choose any version ever published -by the Free Software Foundation. - - If the Program specifies that a proxy can decide which future -versions of the GNU General Public License can be used, that proxy's -public statement of acceptance of a version permanently authorizes you -to choose that version for the Program. - - Later license versions may give you additional or different -permissions. However, no additional obligations are imposed on any -author or copyright holder as a result of your choosing to follow a -later version. - - 15. Disclaimer of Warranty. - - THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY -APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT -HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY -OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, -THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM -IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF -ALL NECESSARY SERVICING, REPAIR OR CORRECTION. - - 16. Limitation of Liability. - - IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING -WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS -THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY -GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE -USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF -DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD -PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), -EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF -SUCH DAMAGES. - - 17. Interpretation of Sections 15 and 16. - - If the disclaimer of warranty and limitation of liability provided -above cannot be given local legal effect according to their terms, -reviewing courts shall apply local law that most closely approximates -an absolute waiver of all civil liability in connection with the -Program, unless a warranty or assumption of liability accompanies a -copy of the Program in return for a fee. - - END OF TERMS AND CONDITIONS - - How to Apply These Terms to Your New Programs - - If you develop a new program, and you want it to be of the greatest -possible use to the public, the best way to achieve this is to make it -free software which everyone can redistribute and change under these terms. - - To do so, attach the following notices to the program. It is safest -to attach them to the start of each source file to most effectively -state the exclusion of warranty; and each file should have at least -the "copyright" line and a pointer to where the full notice is found. - - - Copyright (C) - - This program is free software: you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation, either version 3 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program. If not, see . - -Also add information on how to contact you by electronic and paper mail. - - If the program does terminal interaction, make it output a short -notice like this when it starts in an interactive mode: - - Copyright (C) - This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. - This is free software, and you are welcome to redistribute it - under certain conditions; type `show c' for details. - -The hypothetical commands `show w' and `show c' should show the appropriate -parts of the General Public License. Of course, your program's commands -might be different; for a GUI interface, you would use an "about box". - - You should also get your employer (if you work as a programmer) or school, -if any, to sign a "copyright disclaimer" for the program, if necessary. -For more information on this, and how to apply and follow the GNU GPL, see -. - - The GNU General Public License does not permit incorporating your program -into proprietary programs. If your program is a subroutine library, you -may consider it more useful to permit linking proprietary applications with -the library. If this is what you want to do, use the GNU Lesser General -Public License instead of this License. But first, please read -. - -Name: libquadmath -Files: numpy/.dylibs/libquadmath*.so -Description: dynamically linked to files compiled with gcc -Availability: https://gcc.gnu.org/git/?p=gcc.git;a=tree;f=libquadmath -License: LGPL-2.1-or-later - - GCC Quad-Precision Math Library - Copyright (C) 2010-2019 Free Software Foundation, Inc. - Written by Francois-Xavier Coudert - - This file is part of the libquadmath library. - Libquadmath is free software; you can redistribute it and/or - modify it under the terms of the GNU Library General Public - License as published by the Free Software Foundation; either - version 2.1 of the License, or (at your option) any later version. - - Libquadmath is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - Lesser General Public License for more details. - https://www.gnu.org/licenses/old-licenses/lgpl-2.1.html diff --git a/tools/wheels/LICENSE_win32.txt b/tools/wheels/LICENSE_win32.txt deleted file mode 100644 index c8277e7710a2..000000000000 --- a/tools/wheels/LICENSE_win32.txt +++ /dev/null @@ -1,881 +0,0 @@ - ----- - -This binary distribution of NumPy also bundles the following software: - - -Name: OpenBLAS -Files: numpy.libs\libscipy_openblas*.dll -Description: bundled as a dynamically linked library -Availability: https://github.com/OpenMathLib/OpenBLAS/ -License: BSD-3-Clause - Copyright (c) 2011-2014, The OpenBLAS Project - All rights reserved. - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are - met: - - 1. Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - - 2. Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - 3. Neither the name of the OpenBLAS project nor the names of - its contributors may be used to endorse or promote products - derived from this software without specific prior written - permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR - SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER - CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, - OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE - USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - -Name: LAPACK -Files: numpy.libs\libscipy_openblas*.dll -Description: bundled in OpenBLAS -Availability: https://github.com/OpenMathLib/OpenBLAS/ -License: BSD-3-Clause-Attribution - Copyright (c) 1992-2013 The University of Tennessee and The University - of Tennessee Research Foundation. All rights - reserved. - Copyright (c) 2000-2013 The University of California Berkeley. All - rights reserved. - Copyright (c) 2006-2013 The University of Colorado Denver. All rights - reserved. - - $COPYRIGHT$ - - Additional copyrights may follow - - $HEADER$ - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are - met: - - - Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - - - Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer listed - in this license in the documentation and/or other materials - provided with the distribution. - - - Neither the name of the copyright holders nor the names of its - contributors may be used to endorse or promote products derived from - this software without specific prior written permission. - - The copyright holders provide no reassurances that the source code - provided does not infringe any patent, copyright, or any other - intellectual property rights of third parties. The copyright holders - disclaim any liability to any recipient for claims brought against - recipient by any third party for infringement of that parties - intellectual property rights. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - -Name: GCC runtime library -Files: numpy.libs\libscipy_openblas*.dll -Description: statically linked to files compiled with gcc -Availability: https://gcc.gnu.org/git/?p=gcc.git;a=tree;f=libgfortran -License: GPL-3.0-with-GCC-exception - Copyright (C) 2002-2017 Free Software Foundation, Inc. - - Libgfortran is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 3, or (at your option) - any later version. - - Libgfortran is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - Under Section 7 of GPL version 3, you are granted additional - permissions described in the GCC Runtime Library Exception, version - 3.1, as published by the Free Software Foundation. - - You should have received a copy of the GNU General Public License and - a copy of the GCC Runtime Library Exception along with this program; - see the files COPYING3 and COPYING.RUNTIME respectively. If not, see - . - ----- - -Full text of license texts referred to above follows (that they are -listed below does not necessarily imply the conditions apply to the -present binary release): - ----- - -GCC RUNTIME LIBRARY EXCEPTION - -Version 3.1, 31 March 2009 - -Copyright (C) 2009 Free Software Foundation, Inc. - -Everyone is permitted to copy and distribute verbatim copies of this -license document, but changing it is not allowed. - -This GCC Runtime Library Exception ("Exception") is an additional -permission under section 7 of the GNU General Public License, version -3 ("GPLv3"). It applies to a given file (the "Runtime Library") that -bears a notice placed by the copyright holder of the file stating that -the file is governed by GPLv3 along with this Exception. - -When you use GCC to compile a program, GCC may combine portions of -certain GCC header files and runtime libraries with the compiled -program. The purpose of this Exception is to allow compilation of -non-GPL (including proprietary) programs to use, in this way, the -header files and runtime libraries covered by this Exception. - -0. Definitions. - -A file is an "Independent Module" if it either requires the Runtime -Library for execution after a Compilation Process, or makes use of an -interface provided by the Runtime Library, but is not otherwise based -on the Runtime Library. - -"GCC" means a version of the GNU Compiler Collection, with or without -modifications, governed by version 3 (or a specified later version) of -the GNU General Public License (GPL) with the option of using any -subsequent versions published by the FSF. - -"GPL-compatible Software" is software whose conditions of propagation, -modification and use would permit combination with GCC in accord with -the license of GCC. - -"Target Code" refers to output from any compiler for a real or virtual -target processor architecture, in executable form or suitable for -input to an assembler, loader, linker and/or execution -phase. Notwithstanding that, Target Code does not include data in any -format that is used as a compiler intermediate representation, or used -for producing a compiler intermediate representation. - -The "Compilation Process" transforms code entirely represented in -non-intermediate languages designed for human-written code, and/or in -Java Virtual Machine byte code, into Target Code. Thus, for example, -use of source code generators and preprocessors need not be considered -part of the Compilation Process, since the Compilation Process can be -understood as starting with the output of the generators or -preprocessors. - -A Compilation Process is "Eligible" if it is done using GCC, alone or -with other GPL-compatible software, or if it is done without using any -work based on GCC. For example, using non-GPL-compatible Software to -optimize any GCC intermediate representations would not qualify as an -Eligible Compilation Process. - -1. Grant of Additional Permission. - -You have permission to propagate a work of Target Code formed by -combining the Runtime Library with Independent Modules, even if such -propagation would otherwise violate the terms of GPLv3, provided that -all Target Code was generated by Eligible Compilation Processes. You -may then convey such a combination under terms of your choice, -consistent with the licensing of the Independent Modules. - -2. No Weakening of GCC Copyleft. - -The availability of this Exception does not imply any general -presumption that third-party software is unaffected by the copyleft -requirements of the license of GCC. - ----- - - GNU GENERAL PUBLIC LICENSE - Version 3, 29 June 2007 - - Copyright (C) 2007 Free Software Foundation, Inc. - Everyone is permitted to copy and distribute verbatim copies - of this license document, but changing it is not allowed. - - Preamble - - The GNU General Public License is a free, copyleft license for -software and other kinds of works. - - The licenses for most software and other practical works are designed -to take away your freedom to share and change the works. By contrast, -the GNU General Public License is intended to guarantee your freedom to -share and change all versions of a program--to make sure it remains free -software for all its users. We, the Free Software Foundation, use the -GNU General Public License for most of our software; it applies also to -any other work released this way by its authors. You can apply it to -your programs, too. - - When we speak of free software, we are referring to freedom, not -price. Our General Public Licenses are designed to make sure that you -have the freedom to distribute copies of free software (and charge for -them if you wish), that you receive source code or can get it if you -want it, that you can change the software or use pieces of it in new -free programs, and that you know you can do these things. - - To protect your rights, we need to prevent others from denying you -these rights or asking you to surrender the rights. Therefore, you have -certain responsibilities if you distribute copies of the software, or if -you modify it: responsibilities to respect the freedom of others. - - For example, if you distribute copies of such a program, whether -gratis or for a fee, you must pass on to the recipients the same -freedoms that you received. You must make sure that they, too, receive -or can get the source code. And you must show them these terms so they -know their rights. - - Developers that use the GNU GPL protect your rights with two steps: -(1) assert copyright on the software, and (2) offer you this License -giving you legal permission to copy, distribute and/or modify it. - - For the developers' and authors' protection, the GPL clearly explains -that there is no warranty for this free software. For both users' and -authors' sake, the GPL requires that modified versions be marked as -changed, so that their problems will not be attributed erroneously to -authors of previous versions. - - Some devices are designed to deny users access to install or run -modified versions of the software inside them, although the manufacturer -can do so. This is fundamentally incompatible with the aim of -protecting users' freedom to change the software. The systematic -pattern of such abuse occurs in the area of products for individuals to -use, which is precisely where it is most unacceptable. Therefore, we -have designed this version of the GPL to prohibit the practice for those -products. If such problems arise substantially in other domains, we -stand ready to extend this provision to those domains in future versions -of the GPL, as needed to protect the freedom of users. - - Finally, every program is threatened constantly by software patents. -States should not allow patents to restrict development and use of -software on general-purpose computers, but in those that do, we wish to -avoid the special danger that patents applied to a free program could -make it effectively proprietary. To prevent this, the GPL assures that -patents cannot be used to render the program non-free. - - The precise terms and conditions for copying, distribution and -modification follow. - - TERMS AND CONDITIONS - - 0. Definitions. - - "This License" refers to version 3 of the GNU General Public License. - - "Copyright" also means copyright-like laws that apply to other kinds of -works, such as semiconductor masks. - - "The Program" refers to any copyrightable work licensed under this -License. Each licensee is addressed as "you". "Licensees" and -"recipients" may be individuals or organizations. - - To "modify" a work means to copy from or adapt all or part of the work -in a fashion requiring copyright permission, other than the making of an -exact copy. The resulting work is called a "modified version" of the -earlier work or a work "based on" the earlier work. - - A "covered work" means either the unmodified Program or a work based -on the Program. - - To "propagate" a work means to do anything with it that, without -permission, would make you directly or secondarily liable for -infringement under applicable copyright law, except executing it on a -computer or modifying a private copy. Propagation includes copying, -distribution (with or without modification), making available to the -public, and in some countries other activities as well. - - To "convey" a work means any kind of propagation that enables other -parties to make or receive copies. Mere interaction with a user through -a computer network, with no transfer of a copy, is not conveying. - - An interactive user interface displays "Appropriate Legal Notices" -to the extent that it includes a convenient and prominently visible -feature that (1) displays an appropriate copyright notice, and (2) -tells the user that there is no warranty for the work (except to the -extent that warranties are provided), that licensees may convey the -work under this License, and how to view a copy of this License. If -the interface presents a list of user commands or options, such as a -menu, a prominent item in the list meets this criterion. - - 1. Source Code. - - The "source code" for a work means the preferred form of the work -for making modifications to it. "Object code" means any non-source -form of a work. - - A "Standard Interface" means an interface that either is an official -standard defined by a recognized standards body, or, in the case of -interfaces specified for a particular programming language, one that -is widely used among developers working in that language. - - The "System Libraries" of an executable work include anything, other -than the work as a whole, that (a) is included in the normal form of -packaging a Major Component, but which is not part of that Major -Component, and (b) serves only to enable use of the work with that -Major Component, or to implement a Standard Interface for which an -implementation is available to the public in source code form. A -"Major Component", in this context, means a major essential component -(kernel, window system, and so on) of the specific operating system -(if any) on which the executable work runs, or a compiler used to -produce the work, or an object code interpreter used to run it. - - The "Corresponding Source" for a work in object code form means all -the source code needed to generate, install, and (for an executable -work) run the object code and to modify the work, including scripts to -control those activities. However, it does not include the work's -System Libraries, or general-purpose tools or generally available free -programs which are used unmodified in performing those activities but -which are not part of the work. For example, Corresponding Source -includes interface definition files associated with source files for -the work, and the source code for shared libraries and dynamically -linked subprograms that the work is specifically designed to require, -such as by intimate data communication or control flow between those -subprograms and other parts of the work. - - The Corresponding Source need not include anything that users -can regenerate automatically from other parts of the Corresponding -Source. - - The Corresponding Source for a work in source code form is that -same work. - - 2. Basic Permissions. - - All rights granted under this License are granted for the term of -copyright on the Program, and are irrevocable provided the stated -conditions are met. This License explicitly affirms your unlimited -permission to run the unmodified Program. The output from running a -covered work is covered by this License only if the output, given its -content, constitutes a covered work. This License acknowledges your -rights of fair use or other equivalent, as provided by copyright law. - - You may make, run and propagate covered works that you do not -convey, without conditions so long as your license otherwise remains -in force. You may convey covered works to others for the sole purpose -of having them make modifications exclusively for you, or provide you -with facilities for running those works, provided that you comply with -the terms of this License in conveying all material for which you do -not control copyright. Those thus making or running the covered works -for you must do so exclusively on your behalf, under your direction -and control, on terms that prohibit them from making any copies of -your copyrighted material outside their relationship with you. - - Conveying under any other circumstances is permitted solely under -the conditions stated below. Sublicensing is not allowed; section 10 -makes it unnecessary. - - 3. Protecting Users' Legal Rights From Anti-Circumvention Law. - - No covered work shall be deemed part of an effective technological -measure under any applicable law fulfilling obligations under article -11 of the WIPO copyright treaty adopted on 20 December 1996, or -similar laws prohibiting or restricting circumvention of such -measures. - - When you convey a covered work, you waive any legal power to forbid -circumvention of technological measures to the extent such circumvention -is effected by exercising rights under this License with respect to -the covered work, and you disclaim any intention to limit operation or -modification of the work as a means of enforcing, against the work's -users, your or third parties' legal rights to forbid circumvention of -technological measures. - - 4. Conveying Verbatim Copies. - - You may convey verbatim copies of the Program's source code as you -receive it, in any medium, provided that you conspicuously and -appropriately publish on each copy an appropriate copyright notice; -keep intact all notices stating that this License and any -non-permissive terms added in accord with section 7 apply to the code; -keep intact all notices of the absence of any warranty; and give all -recipients a copy of this License along with the Program. - - You may charge any price or no price for each copy that you convey, -and you may offer support or warranty protection for a fee. - - 5. Conveying Modified Source Versions. - - You may convey a work based on the Program, or the modifications to -produce it from the Program, in the form of source code under the -terms of section 4, provided that you also meet all of these conditions: - - a) The work must carry prominent notices stating that you modified - it, and giving a relevant date. - - b) The work must carry prominent notices stating that it is - released under this License and any conditions added under section - 7. This requirement modifies the requirement in section 4 to - "keep intact all notices". - - c) You must license the entire work, as a whole, under this - License to anyone who comes into possession of a copy. This - License will therefore apply, along with any applicable section 7 - additional terms, to the whole of the work, and all its parts, - regardless of how they are packaged. This License gives no - permission to license the work in any other way, but it does not - invalidate such permission if you have separately received it. - - d) If the work has interactive user interfaces, each must display - Appropriate Legal Notices; however, if the Program has interactive - interfaces that do not display Appropriate Legal Notices, your - work need not make them do so. - - A compilation of a covered work with other separate and independent -works, which are not by their nature extensions of the covered work, -and which are not combined with it such as to form a larger program, -in or on a volume of a storage or distribution medium, is called an -"aggregate" if the compilation and its resulting copyright are not -used to limit the access or legal rights of the compilation's users -beyond what the individual works permit. Inclusion of a covered work -in an aggregate does not cause this License to apply to the other -parts of the aggregate. - - 6. Conveying Non-Source Forms. - - You may convey a covered work in object code form under the terms -of sections 4 and 5, provided that you also convey the -machine-readable Corresponding Source under the terms of this License, -in one of these ways: - - a) Convey the object code in, or embodied in, a physical product - (including a physical distribution medium), accompanied by the - Corresponding Source fixed on a durable physical medium - customarily used for software interchange. - - b) Convey the object code in, or embodied in, a physical product - (including a physical distribution medium), accompanied by a - written offer, valid for at least three years and valid for as - long as you offer spare parts or customer support for that product - model, to give anyone who possesses the object code either (1) a - copy of the Corresponding Source for all the software in the - product that is covered by this License, on a durable physical - medium customarily used for software interchange, for a price no - more than your reasonable cost of physically performing this - conveying of source, or (2) access to copy the - Corresponding Source from a network server at no charge. - - c) Convey individual copies of the object code with a copy of the - written offer to provide the Corresponding Source. This - alternative is allowed only occasionally and noncommercially, and - only if you received the object code with such an offer, in accord - with subsection 6b. - - d) Convey the object code by offering access from a designated - place (gratis or for a charge), and offer equivalent access to the - Corresponding Source in the same way through the same place at no - further charge. You need not require recipients to copy the - Corresponding Source along with the object code. If the place to - copy the object code is a network server, the Corresponding Source - may be on a different server (operated by you or a third party) - that supports equivalent copying facilities, provided you maintain - clear directions next to the object code saying where to find the - Corresponding Source. Regardless of what server hosts the - Corresponding Source, you remain obligated to ensure that it is - available for as long as needed to satisfy these requirements. - - e) Convey the object code using peer-to-peer transmission, provided - you inform other peers where the object code and Corresponding - Source of the work are being offered to the general public at no - charge under subsection 6d. - - A separable portion of the object code, whose source code is excluded -from the Corresponding Source as a System Library, need not be -included in conveying the object code work. - - A "User Product" is either (1) a "consumer product", which means any -tangible personal property which is normally used for personal, family, -or household purposes, or (2) anything designed or sold for incorporation -into a dwelling. In determining whether a product is a consumer product, -doubtful cases shall be resolved in favor of coverage. For a particular -product received by a particular user, "normally used" refers to a -typical or common use of that class of product, regardless of the status -of the particular user or of the way in which the particular user -actually uses, or expects or is expected to use, the product. A product -is a consumer product regardless of whether the product has substantial -commercial, industrial or non-consumer uses, unless such uses represent -the only significant mode of use of the product. - - "Installation Information" for a User Product means any methods, -procedures, authorization keys, or other information required to install -and execute modified versions of a covered work in that User Product from -a modified version of its Corresponding Source. The information must -suffice to ensure that the continued functioning of the modified object -code is in no case prevented or interfered with solely because -modification has been made. - - If you convey an object code work under this section in, or with, or -specifically for use in, a User Product, and the conveying occurs as -part of a transaction in which the right of possession and use of the -User Product is transferred to the recipient in perpetuity or for a -fixed term (regardless of how the transaction is characterized), the -Corresponding Source conveyed under this section must be accompanied -by the Installation Information. But this requirement does not apply -if neither you nor any third party retains the ability to install -modified object code on the User Product (for example, the work has -been installed in ROM). - - The requirement to provide Installation Information does not include a -requirement to continue to provide support service, warranty, or updates -for a work that has been modified or installed by the recipient, or for -the User Product in which it has been modified or installed. Access to a -network may be denied when the modification itself materially and -adversely affects the operation of the network or violates the rules and -protocols for communication across the network. - - Corresponding Source conveyed, and Installation Information provided, -in accord with this section must be in a format that is publicly -documented (and with an implementation available to the public in -source code form), and must require no special password or key for -unpacking, reading or copying. - - 7. Additional Terms. - - "Additional permissions" are terms that supplement the terms of this -License by making exceptions from one or more of its conditions. -Additional permissions that are applicable to the entire Program shall -be treated as though they were included in this License, to the extent -that they are valid under applicable law. If additional permissions -apply only to part of the Program, that part may be used separately -under those permissions, but the entire Program remains governed by -this License without regard to the additional permissions. - - When you convey a copy of a covered work, you may at your option -remove any additional permissions from that copy, or from any part of -it. (Additional permissions may be written to require their own -removal in certain cases when you modify the work.) You may place -additional permissions on material, added by you to a covered work, -for which you have or can give appropriate copyright permission. - - Notwithstanding any other provision of this License, for material you -add to a covered work, you may (if authorized by the copyright holders of -that material) supplement the terms of this License with terms: - - a) Disclaiming warranty or limiting liability differently from the - terms of sections 15 and 16 of this License; or - - b) Requiring preservation of specified reasonable legal notices or - author attributions in that material or in the Appropriate Legal - Notices displayed by works containing it; or - - c) Prohibiting misrepresentation of the origin of that material, or - requiring that modified versions of such material be marked in - reasonable ways as different from the original version; or - - d) Limiting the use for publicity purposes of names of licensors or - authors of the material; or - - e) Declining to grant rights under trademark law for use of some - trade names, trademarks, or service marks; or - - f) Requiring indemnification of licensors and authors of that - material by anyone who conveys the material (or modified versions of - it) with contractual assumptions of liability to the recipient, for - any liability that these contractual assumptions directly impose on - those licensors and authors. - - All other non-permissive additional terms are considered "further -restrictions" within the meaning of section 10. If the Program as you -received it, or any part of it, contains a notice stating that it is -governed by this License along with a term that is a further -restriction, you may remove that term. If a license document contains -a further restriction but permits relicensing or conveying under this -License, you may add to a covered work material governed by the terms -of that license document, provided that the further restriction does -not survive such relicensing or conveying. - - If you add terms to a covered work in accord with this section, you -must place, in the relevant source files, a statement of the -additional terms that apply to those files, or a notice indicating -where to find the applicable terms. - - Additional terms, permissive or non-permissive, may be stated in the -form of a separately written license, or stated as exceptions; -the above requirements apply either way. - - 8. Termination. - - You may not propagate or modify a covered work except as expressly -provided under this License. Any attempt otherwise to propagate or -modify it is void, and will automatically terminate your rights under -this License (including any patent licenses granted under the third -paragraph of section 11). - - However, if you cease all violation of this License, then your -license from a particular copyright holder is reinstated (a) -provisionally, unless and until the copyright holder explicitly and -finally terminates your license, and (b) permanently, if the copyright -holder fails to notify you of the violation by some reasonable means -prior to 60 days after the cessation. - - Moreover, your license from a particular copyright holder is -reinstated permanently if the copyright holder notifies you of the -violation by some reasonable means, this is the first time you have -received notice of violation of this License (for any work) from that -copyright holder, and you cure the violation prior to 30 days after -your receipt of the notice. - - Termination of your rights under this section does not terminate the -licenses of parties who have received copies or rights from you under -this License. If your rights have been terminated and not permanently -reinstated, you do not qualify to receive new licenses for the same -material under section 10. - - 9. Acceptance Not Required for Having Copies. - - You are not required to accept this License in order to receive or -run a copy of the Program. Ancillary propagation of a covered work -occurring solely as a consequence of using peer-to-peer transmission -to receive a copy likewise does not require acceptance. However, -nothing other than this License grants you permission to propagate or -modify any covered work. These actions infringe copyright if you do -not accept this License. Therefore, by modifying or propagating a -covered work, you indicate your acceptance of this License to do so. - - 10. Automatic Licensing of Downstream Recipients. - - Each time you convey a covered work, the recipient automatically -receives a license from the original licensors, to run, modify and -propagate that work, subject to this License. You are not responsible -for enforcing compliance by third parties with this License. - - An "entity transaction" is a transaction transferring control of an -organization, or substantially all assets of one, or subdividing an -organization, or merging organizations. If propagation of a covered -work results from an entity transaction, each party to that -transaction who receives a copy of the work also receives whatever -licenses to the work the party's predecessor in interest had or could -give under the previous paragraph, plus a right to possession of the -Corresponding Source of the work from the predecessor in interest, if -the predecessor has it or can get it with reasonable efforts. - - You may not impose any further restrictions on the exercise of the -rights granted or affirmed under this License. For example, you may -not impose a license fee, royalty, or other charge for exercise of -rights granted under this License, and you may not initiate litigation -(including a cross-claim or counterclaim in a lawsuit) alleging that -any patent claim is infringed by making, using, selling, offering for -sale, or importing the Program or any portion of it. - - 11. Patents. - - A "contributor" is a copyright holder who authorizes use under this -License of the Program or a work on which the Program is based. The -work thus licensed is called the contributor's "contributor version". - - A contributor's "essential patent claims" are all patent claims -owned or controlled by the contributor, whether already acquired or -hereafter acquired, that would be infringed by some manner, permitted -by this License, of making, using, or selling its contributor version, -but do not include claims that would be infringed only as a -consequence of further modification of the contributor version. For -purposes of this definition, "control" includes the right to grant -patent sublicenses in a manner consistent with the requirements of -this License. - - Each contributor grants you a non-exclusive, worldwide, royalty-free -patent license under the contributor's essential patent claims, to -make, use, sell, offer for sale, import and otherwise run, modify and -propagate the contents of its contributor version. - - In the following three paragraphs, a "patent license" is any express -agreement or commitment, however denominated, not to enforce a patent -(such as an express permission to practice a patent or covenant not to -sue for patent infringement). To "grant" such a patent license to a -party means to make such an agreement or commitment not to enforce a -patent against the party. - - If you convey a covered work, knowingly relying on a patent license, -and the Corresponding Source of the work is not available for anyone -to copy, free of charge and under the terms of this License, through a -publicly available network server or other readily accessible means, -then you must either (1) cause the Corresponding Source to be so -available, or (2) arrange to deprive yourself of the benefit of the -patent license for this particular work, or (3) arrange, in a manner -consistent with the requirements of this License, to extend the patent -license to downstream recipients. "Knowingly relying" means you have -actual knowledge that, but for the patent license, your conveying the -covered work in a country, or your recipient's use of the covered work -in a country, would infringe one or more identifiable patents in that -country that you have reason to believe are valid. - - If, pursuant to or in connection with a single transaction or -arrangement, you convey, or propagate by procuring conveyance of, a -covered work, and grant a patent license to some of the parties -receiving the covered work authorizing them to use, propagate, modify -or convey a specific copy of the covered work, then the patent license -you grant is automatically extended to all recipients of the covered -work and works based on it. - - A patent license is "discriminatory" if it does not include within -the scope of its coverage, prohibits the exercise of, or is -conditioned on the non-exercise of one or more of the rights that are -specifically granted under this License. You may not convey a covered -work if you are a party to an arrangement with a third party that is -in the business of distributing software, under which you make payment -to the third party based on the extent of your activity of conveying -the work, and under which the third party grants, to any of the -parties who would receive the covered work from you, a discriminatory -patent license (a) in connection with copies of the covered work -conveyed by you (or copies made from those copies), or (b) primarily -for and in connection with specific products or compilations that -contain the covered work, unless you entered into that arrangement, -or that patent license was granted, prior to 28 March 2007. - - Nothing in this License shall be construed as excluding or limiting -any implied license or other defenses to infringement that may -otherwise be available to you under applicable patent law. - - 12. No Surrender of Others' Freedom. - - If conditions are imposed on you (whether by court order, agreement or -otherwise) that contradict the conditions of this License, they do not -excuse you from the conditions of this License. If you cannot convey a -covered work so as to satisfy simultaneously your obligations under this -License and any other pertinent obligations, then as a consequence you may -not convey it at all. For example, if you agree to terms that obligate you -to collect a royalty for further conveying from those to whom you convey -the Program, the only way you could satisfy both those terms and this -License would be to refrain entirely from conveying the Program. - - 13. Use with the GNU Affero General Public License. - - Notwithstanding any other provision of this License, you have -permission to link or combine any covered work with a work licensed -under version 3 of the GNU Affero General Public License into a single -combined work, and to convey the resulting work. The terms of this -License will continue to apply to the part which is the covered work, -but the special requirements of the GNU Affero General Public License, -section 13, concerning interaction through a network will apply to the -combination as such. - - 14. Revised Versions of this License. - - The Free Software Foundation may publish revised and/or new versions of -the GNU General Public License from time to time. Such new versions will -be similar in spirit to the present version, but may differ in detail to -address new problems or concerns. - - Each version is given a distinguishing version number. If the -Program specifies that a certain numbered version of the GNU General -Public License "or any later version" applies to it, you have the -option of following the terms and conditions either of that numbered -version or of any later version published by the Free Software -Foundation. If the Program does not specify a version number of the -GNU General Public License, you may choose any version ever published -by the Free Software Foundation. - - If the Program specifies that a proxy can decide which future -versions of the GNU General Public License can be used, that proxy's -public statement of acceptance of a version permanently authorizes you -to choose that version for the Program. - - Later license versions may give you additional or different -permissions. However, no additional obligations are imposed on any -author or copyright holder as a result of your choosing to follow a -later version. - - 15. Disclaimer of Warranty. - - THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY -APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT -HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY -OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, -THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM -IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF -ALL NECESSARY SERVICING, REPAIR OR CORRECTION. - - 16. Limitation of Liability. - - IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING -WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS -THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY -GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE -USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF -DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD -PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), -EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF -SUCH DAMAGES. - - 17. Interpretation of Sections 15 and 16. - - If the disclaimer of warranty and limitation of liability provided -above cannot be given local legal effect according to their terms, -reviewing courts shall apply local law that most closely approximates -an absolute waiver of all civil liability in connection with the -Program, unless a warranty or assumption of liability accompanies a -copy of the Program in return for a fee. - - END OF TERMS AND CONDITIONS - - How to Apply These Terms to Your New Programs - - If you develop a new program, and you want it to be of the greatest -possible use to the public, the best way to achieve this is to make it -free software which everyone can redistribute and change under these terms. - - To do so, attach the following notices to the program. It is safest -to attach them to the start of each source file to most effectively -state the exclusion of warranty; and each file should have at least -the "copyright" line and a pointer to where the full notice is found. - - - Copyright (C) - - This program is free software: you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation, either version 3 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program. If not, see . - -Also add information on how to contact you by electronic and paper mail. - - If the program does terminal interaction, make it output a short -notice like this when it starts in an interactive mode: - - Copyright (C) - This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. - This is free software, and you are welcome to redistribute it - under certain conditions; type `show c' for details. - -The hypothetical commands `show w' and `show c' should show the appropriate -parts of the General Public License. Of course, your program's commands -might be different; for a GUI interface, you would use an "about box". - - You should also get your employer (if you work as a programmer) or school, -if any, to sign a "copyright disclaimer" for the program, if necessary. -For more information on this, and how to apply and follow the GNU GPL, see -. - - The GNU General Public License does not permit incorporating your program -into proprietary programs. If your program is a subroutine library, you -may consider it more useful to permit linking proprietary applications with -the library. If this is what you want to do, use the GNU Lesser General -Public License instead of this License. But first, please read -. - diff --git a/tools/wheels/check_license.py b/tools/wheels/check_license.py deleted file mode 100644 index 572295b4ca2f..000000000000 --- a/tools/wheels/check_license.py +++ /dev/null @@ -1,58 +0,0 @@ -#!/usr/bin/env python -""" -check_license.py [MODULE] - -Check the presence of a LICENSE.txt in the installed module directory, -and that it appears to contain text prevalent for a NumPy binary -distribution. - -""" -import argparse -import pathlib -import re -import sys - - -def check_text(text): - ok = "Copyright (c)" in text and re.search( - r"This binary distribution of \w+ also bundles the following software", - text, - ) - return ok - - -def main(): - p = argparse.ArgumentParser(usage=__doc__.rstrip()) - p.add_argument("module", nargs="?", default="numpy") - args = p.parse_args() - - # Drop '' from sys.path - sys.path.pop(0) - - # Find module path - __import__(args.module) - mod = sys.modules[args.module] - - # LICENSE.txt is installed in the .dist-info directory, so find it there - sitepkgs = pathlib.Path(mod.__file__).parent.parent - distinfo_path = next(iter(sitepkgs.glob("numpy-*.dist-info"))) - - # Check license text - license_txt = distinfo_path / "LICENSE.txt" - with open(license_txt, encoding="utf-8") as f: - text = f.read() - - ok = check_text(text) - if not ok: - print( - f"ERROR: License text {license_txt} does not contain expected " - "text fragments\n" - ) - print(text) - sys.exit(1) - - sys.exit(0) - - -if __name__ == "__main__": - main() diff --git a/tools/wheels/cibw_before_build.sh b/tools/wheels/cibw_before_build.sh index 3e1d4498fe7c..381c329a5372 100644 --- a/tools/wheels/cibw_before_build.sh +++ b/tools/wheels/cibw_before_build.sh @@ -2,55 +2,51 @@ set -xe PROJECT_DIR="${1:-$PWD}" - # remove any cruft from a previous run rm -rf build -# Update license -echo "" >> $PROJECT_DIR/LICENSE.txt -echo "----" >> $PROJECT_DIR/LICENSE.txt -echo "" >> $PROJECT_DIR/LICENSE.txt -cat $PROJECT_DIR/LICENSES_bundled.txt >> $PROJECT_DIR/LICENSE.txt -if [[ $RUNNER_OS == "Linux" ]] ; then - cat $PROJECT_DIR/tools/wheels/LICENSE_linux.txt >> $PROJECT_DIR/LICENSE.txt -elif [[ $RUNNER_OS == "macOS" ]]; then - cat $PROJECT_DIR/tools/wheels/LICENSE_osx.txt >> $PROJECT_DIR/LICENSE.txt -elif [[ $RUNNER_OS == "Windows" ]]; then - cat $PROJECT_DIR/tools/wheels/LICENSE_win32.txt >> $PROJECT_DIR/LICENSE.txt -fi - if [[ $(python -c"import sys; print(sys.maxsize)") < $(python -c"import sys; print(2**33)") ]]; then echo "No BLAS used for 32-bit wheels" export INSTALL_OPENBLAS=false -elif [[ $(python -c"import sysconfig; print(sysconfig.get_platform())") == "win-arm64" ]]; then - echo "No BLAS used for ARM64 wheels" - export INSTALL_OPENBLAS=false elif [ -z $INSTALL_OPENBLAS ]; then # the macos_arm64 build might not set this variable export INSTALL_OPENBLAS=true fi -# Install Openblas from scipy-openblas64 +# Install OpenBLAS from scipy-openblas32|64 if [[ "$INSTALL_OPENBLAS" = "true" ]] ; then - echo PKG_CONFIG_PATH $PKG_CONFIG_PATH - PKG_CONFIG_PATH=$PROJECT_DIR/.openblas - rm -rf $PKG_CONFIG_PATH - mkdir -p $PKG_CONFIG_PATH - python -m pip install -r requirements/ci_requirements.txt - python -c "import scipy_openblas64; print(scipy_openblas64.get_pkg_config())" > $PKG_CONFIG_PATH/scipy-openblas.pc - # Copy the shared objects to a path under $PKG_CONFIG_PATH, the build - # will point $LD_LIBRARY_PATH there and then auditwheel/delocate-wheel will - # pull these into the wheel. Use python to avoid windows/posix problems - python < $pkgconf_path/scipy-openblas.pc + + # Copy scipy-openblas DLL's to a fixed location so we can point delvewheel + # at it in `repair_windows.sh` (needed only on Windows because of the lack + # of RPATH support). + if [[ $RUNNER_OS == "Windows" ]]; then + python < + +Example:: + + python tools/write_release.py 1.7.0 + +Needs to be run from the root of the repository and assumes +that the output is in `release` and wheels and sdist in +`release/installers`. + +Translation from rst to md markdown requires Pandoc, you +will need to rely on your distribution to provide that. + +""" +import argparse +import os +import subprocess +from pathlib import Path + +# Name of the notes directory +NOTES_DIR = "doc/source/release" +# Name of the output directory +OUTPUT_DIR = "release" +# Output base name, `.rst` or `.md` will be appended +OUTPUT_FILE = "README" + +def write_release(version): + """ + Copy the -notes.rst file to the OUTPUT_DIR and use + pandoc to translate it to markdown. That results in both + README.rst and README.md files that can be used for on + github for the release. + + Parameters + ---------- + version: str + Release version, e.g., '2.3.2', etc. + + Returns + ------- + None. + + """ + notes = Path(NOTES_DIR) / f"{version}-notes.rst" + outdir = Path(OUTPUT_DIR) + outdir.mkdir(exist_ok=True) + target_md = outdir / f"{OUTPUT_FILE}.md" + target_rst = outdir / f"{OUTPUT_FILE}.rst" + + # translate README.rst to md for posting on GitHub + os.system(f"cp {notes} {target_rst}") + subprocess.run( + ["pandoc", "-s", "-o", str(target_md), str(target_rst), "--wrap=preserve"], + check=True, + ) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument( + "version", + help="NumPy version of the release, e.g. 2.3.2, etc.") + + args = parser.parse_args() + write_release(args.version) diff --git a/vendored-meson/meson b/vendored-meson/meson index f754c4258805..e72c717199fa 160000 --- a/vendored-meson/meson +++ b/vendored-meson/meson @@ -1 +1 @@ -Subproject commit f754c4258805056ed7be09830d96af45215d341b +Subproject commit e72c717199fa18d34020c7c97f9de3f388c5e055